gnunetbib

Bibliography (BibTeX, based on AnonBib)
Log | Files | Refs | README | LICENSE

commit 9ca6ed63f812efb18db65b601be7c4721f675510
parent 7c5b4718519ad8277acbc3286ffec43b57185fd7
Author: Nils Gillmann <ng0@n0.is>
Date:   Thu,  4 Oct 2018 23:41:35 +0000

fix extraneous period errors

Signed-off-by: Nils Gillmann <ng0@n0.is>

Diffstat:
Mgnunetbib.bib | 321+++++++++++++++++++++++++++++--------------------------------------------------
1 file changed, 117 insertions(+), 204 deletions(-)

diff --git a/gnunetbib.bib b/gnunetbib.bib @@ -201,7 +201,7 @@ school = {Technische Universit{\"a}t M{\"u}nchen}, type = {PhD}, address = {M{\"u}nchen}, - abstract = {This thesis provides the design and implementation of a secure and resilient communication infrastructure for decentralized peer-to-peer networks. The proposed communication infrastructure tries to overcome limitations to unrestricted communication on today{\textquoteright}s Internet and has the goal of re-establishing unhindered communication between users. With the GNU name system, we present a fully decentralized, resilient, and privacy-preserving alternative to DNS and existing security infrastructures. }, + abstract = {This thesis provides the design and implementation of a secure and resilient communication infrastructure for decentralized peer-to-peer networks. The proposed communication infrastructure tries to overcome limitations to unrestricted communication on today{\textquoteright}s Internet and has the goal of re-establishing unhindered communication between users. With the GNU name system, we present a fully decentralized, resilient, and privacy-preserving alternative to DNS and existing security infrastructures}, keywords = {Communication, GNU Name System, GNUnet, P2P, resilience}, isbn = {3-937201-45-9}, doi = {10.2313/NET-2015-02-1}, @@ -254,7 +254,7 @@ We present a hybrid PIR protocol that combines two PIR protocols, one from each url = {http://dx.doi.org/10.1007/978-3-319-08506-7_4}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/pir_0.pdf}, author = {Devet, Casey and Goldberg, Ian}, - editor = {De Cristofaro, Emiliano and Murdoch, StevenJ.} + editor = {De Cristofaro, Emiliano and Murdoch, StevenJ} } @conference {CADET, title = {CADET: Confidential Ad-hoc Decentralized End-to-End Transport}, @@ -362,7 +362,7 @@ communication systems enable the reconstruction of user behavioral profiles. Pro url = {http://dx.doi.org/10.1007/978-3-319-08506-7_11}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/anonymity_and_cover_traffic.pdf}, author = {Oya, Simon and Troncoso, Carmela and P{\'e}rez-Gonz{\'a}lez, Fernando}, - editor = {De Cristofaro, Emiliano and Murdoch, StevenJ.} + editor = {De Cristofaro, Emiliano and Murdoch, StevenJ} } @article {private_presence_service2014, title = {DP5: A Private Presence Service}, @@ -393,7 +393,7 @@ as an evaluation of its performance}, url = {http://dx.doi.org/10.1007/978-3-319-08506-7_3}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/obfuscation_osn.pdf}, author = {Chen, Terence and Boreli, Roksana and Kaafar, Mohamed-Ali and Friedman, Arik}, - editor = {De Cristofaro, Emiliano and Murdoch, StevenJ.} + editor = {De Cristofaro, Emiliano and Murdoch, StevenJ} } @mastersthesis {SupritiSinghMasterThesis2014, title = {Experimental comparison of Byzantine fault tolerant distributed hash tables}, @@ -428,7 +428,7 @@ distributed encryption scheme that is much more efficient for small plaintext do url = {http://dx.doi.org/10.1007/978-3-319-08506-7_7}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/foward_secure_encryption.pdf}, author = {Lueks, Wouter and Hoepman, Jaap-Henk and Kursawe, Klaus}, - editor = {De Cristofaro, Emiliano and Murdoch, StevenJ.} + editor = {De Cristofaro, Emiliano and Murdoch, StevenJ} } @mastersthesis {ma_kirsch_2014_0, title = {Improved Kernel-Based Port-Knocking in Linux}, @@ -492,8 +492,7 @@ The design is evaluated with the help of simulation and a realistic implementati abstract = {Linear programming (LP) has numerous applications in different fields. In some scenarios, e.g. supply chain master planning (SCMP), the goal is solving linear programs involving multiple parties reluctant to sharing their private information. In this case, methods from the area of secure multi-party computation (SMC) can be used. Secure multi-party versions of LP solvers have been known to be impractical due to high communication complexity. To overcome this, solutions based on problem transformation have been put forward. In this thesis, one such algorithm, proposed by Dreier and Kerschbaum, is discussed, implemented, and evaluated with respect to numerical stability and scalability. Results -obtained with different parameter sets and different test cases are presented and some problems are exposed. It was found that the algorithm has some unforeseen limitations, particularly when implemented within the bounds of normal primitive data types. Random numbers generated during the protocol have to be extremely small so as to not cause problems with overflows after a series of multiplications. The number of peers participating additionally limits the size of numbers. A positive finding was that results produced when none of the aforementioned problems occur are generally quite accurate. We discuss a few possibilities to overcome some of the problems with an implementation using arbitrary precision numbers. -}, +obtained with different parameter sets and different test cases are presented and some problems are exposed. It was found that the algorithm has some unforeseen limitations, particularly when implemented within the bounds of normal primitive data types. Random numbers generated during the protocol have to be extremely small so as to not cause problems with overflows after a series of multiplications. The number of peers participating additionally limits the size of numbers. A positive finding was that results produced when none of the aforementioned problems occur are generally quite accurate. We discuss a few possibilities to overcome some of the problems with an implementation using arbitrary precision numbers}, keywords = {GNUnet, linear programming, secure multi-party computation}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/arias2014bs.pdf}, author = {Raphael Arias} @@ -530,8 +529,7 @@ In this paper we explore the implications of differential privacy when the indis school = {University of Amsterdam}, type = {Master{\textquoteright}s}, address = {Amsterdam}, - abstract = {This work presents the design of a social messaging service for the GNUnet peer-to-peer framework that offers scalability, extensibility, and end-to-end encrypted communication. The scalability property is achieved through multicast message delivery, while extensibility is made possible by using PSYC (Protocol for SYnchronous Communication), which provides an extensible RPC (Remote Procedure Call) syntax that can evolve over time without having to upgrade the software on all nodes in the network. Another key feature provided by the PSYC layer are stateful multicast channels, which are used to store e.g. user profiles. End-to-end encrypted communication is provided by the mesh service of GNUnet, upon which the multicast channels are built. Pseudonymous users and social places in the system have cryptographical identities --- identified by their public key --- these are mapped to human memorable names using GNS (GNU Name System), where each pseudonym has a zone pointing to its places. -}, + abstract = {This work presents the design of a social messaging service for the GNUnet peer-to-peer framework that offers scalability, extensibility, and end-to-end encrypted communication. The scalability property is achieved through multicast message delivery, while extensibility is made possible by using PSYC (Protocol for SYnchronous Communication), which provides an extensible RPC (Remote Procedure Call) syntax that can evolve over time without having to upgrade the software on all nodes in the network. Another key feature provided by the PSYC layer are stateful multicast channels, which are used to store e.g. user profiles. End-to-end encrypted communication is provided by the mesh service of GNUnet, upon which the multicast channels are built. Pseudonymous users and social places in the system have cryptographical identities --- identified by their public key --- these are mapped to human memorable names using GNS (GNU Name System), where each pseudonym has a zone pointing to its places}, keywords = {GNS, GNUnet, PSYC, social networks}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/gnunet-psyc.pdf}, author = {Gabor X Toth} @@ -546,8 +544,7 @@ In this paper we explore the implications of differential privacy when the indis address = {La Rochelle, France}, abstract = {A central problem on the Internet today is that key infrastructure for security is concentrated in a few places. This is particularly true in the areas of naming and public key infrastructure. Secret services and other government organizations can use this fact to block access to information or monitor communications. One of the most popular and easy to perform techniques is to make information on the Web inaccessible by censoring or manipulating the Domain Name System (DNS). With the introduction of DNSSEC, the DNS is furthermore posed to become an alternative PKI to the failing X.509 CA system, further cementing the power of those in charge of operating DNS. -This paper maps the design space and gives design requirements for censorship resistant name systems. We survey the existing range of ideas for the realization of such a system and discuss the challenges these systems have to overcome in practice. Finally, we present the results from a survey on browser usage, which supports the idea that delegation should be a key ingredient in any censorship resistant name system. -}, +This paper maps the design space and gives design requirements for censorship resistant name systems. We survey the existing range of ideas for the realization of such a system and discuss the challenges these systems have to overcome in practice. Finally, we present the results from a survey on browser usage, which supports the idea that delegation should be a key ingredient in any censorship resistant name system}, keywords = {DNS, GNS, GNU Name System, GNUnet, PKI, SDSI, Zooko{\textquoteright}s Triangle}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/fps2013wachs.pdf}, author = {Matthias Wachs and Martin Schanzenbach and Christian Grothoff} @@ -641,8 +638,7 @@ This thesis presents the design and implementation of a setup for utilizing SPDY Furthermore, we focus on the SPDY server push feature which allows servers to send multiple responses to a single request for reducing latency and traffic on loading web pages. We propose a prediction algorithm for employing push at SPDY servers and proxies. The algorithm makes predictions based on previous requests and responses and initially does not know anything about the data which it will push. -This thesis includes extensive measurement data highlighting the possible benefits of using SPDY instead of HTTP and HTTPS (1.0 or 1.1), especially with respect to networks experiencing latency or loss. Moreover, the real profit from using SPDY within the Tor network on loading some of the most popular web sites is presented. Finally, evaluations of the proposed push prediction algorithm are given for emphasizing the possible gain of employing it at SPDY reverse and forward proxies. -}, +This thesis includes extensive measurement data highlighting the possible benefits of using SPDY instead of HTTP and HTTPS (1.0 or 1.1), especially with respect to networks experiencing latency or loss. Moreover, the real profit from using SPDY within the Tor network on loading some of the most popular web sites is presented. Finally, evaluations of the proposed push prediction algorithm are given for emphasizing the possible gain of employing it at SPDY reverse and forward proxies}, keywords = {anonymity, HTTP, privacy, spdy, Tor}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/uzunov2013torspdy.pdf}, author = {Andrey Uzunov} @@ -718,8 +714,7 @@ This thesis includes extensive measurement data highlighting the possible benefi month = feb, address = {Bonaire}, abstract = {Tor, an anonymity network formed by volunteer nodes, uses the estimated bandwidth of the nodes as a central feature of its path selection algorithm. The current load on nodes is not considered in this algorithm, however, and we ob- -serve that some nodes persist in being under-utilized or congested. This can degrade the network{\textquoteright}s performance, discourage Tor adoption, and consequently reduce the size of Tor{\textquoteright}s anonymity set. In an effort to reduce congestion and improve load balancing, we propose a congestion-aware path selection algorithm. Using latency as an indicator of congestion, clients use opportunistic and lightweight active measurements to evaluate the congestion state of nodes, and reject nodes that appear congested. Through experiments conducted on the live Tor network, we verify our hypothesis that clients can infer congestion using latency and show that congestion-aware path selection can improve performance. -}, +serve that some nodes persist in being under-utilized or congested. This can degrade the network{\textquoteright}s performance, discourage Tor adoption, and consequently reduce the size of Tor{\textquoteright}s anonymity set. In an effort to reduce congestion and improve load balancing, we propose a congestion-aware path selection algorithm. Using latency as an indicator of congestion, clients use opportunistic and lightweight active measurements to evaluate the congestion state of nodes, and reject nodes that appear congested. Through experiments conducted on the live Tor network, we verify our hypothesis that clients can infer congestion using latency and show that congestion-aware path selection can improve performance}, keywords = {algorithms, Tor, volunteer nodes}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FC\%2712\%20-\%20Congestion-aware\%20Path\%20Selection\%20for\%20Tor.pdf}, author = {Tao Wang and Kevin Bauer and Clara Forero and Ian Goldberg} @@ -766,8 +761,7 @@ The idea behind our approach is to convert regular expressions into finite autom There exist several possible applications for this general approach of decentralized regular expression evaluation. However, in this thesis we focus on the application of discovering users that are willing to provide network access using a specified protocol to a particular destination. -We have implemented the system for our proposed approach and conducted a simulation. Moreover we present the results of an emulation of the implemented system in a cluster. -}, +We have implemented the system for our proposed approach and conducted a simulation. Moreover we present the results of an emulation of the implemented system in a cluster}, keywords = {DFA, distributed hash table, GNUnet, NFA, regular expressions, search}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/szengel2012ms.pdf}, author = {Maximilian Szengel} @@ -828,8 +822,7 @@ of the protocol. Security measures are included which make it prohibitively expensive for a typical active participating adversary to significantly manipulate the estimates. This paper includes experimental results that demonstrate the viability, efficiency and -accuracy of the protocol. -}, +accuracy of the protocol}, keywords = {GNUnet, network security, network size estimation, peer-to-peer networking}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/nse-techreport.pdf}, author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff} @@ -843,8 +836,7 @@ accuracy of the protocol. publisher = {Springer Verlag}, organization = {Springer Verlag}, address = {Prague, CZ}, - abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for performance tuning of P2P routing algorithms. This paper introduces and evaluates a new efficient method for participants in an unstructured P2P network to establish the size of the overall network. The presented method is highly efficient, propagating information about the current size of the network to all participants using O(|E|) operations where |E| is the number of edges in the network. Afterwards, all nodes have the same network size estimate, which can be made arbitrarily accurate by averaging results from multiple rounds of the protocol. Security measures are included which make it prohibitively expensive for a typical active participating adversary to significantly manipulate the estimates. This paper includes experimental results that demonstrate the viability, efficiency and accuracy of the protocol. -}, + abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for performance tuning of P2P routing algorithms. This paper introduces and evaluates a new efficient method for participants in an unstructured P2P network to establish the size of the overall network. The presented method is highly efficient, propagating information about the current size of the network to all participants using O(|E|) operations where |E| is the number of edges in the network. Afterwards, all nodes have the same network size estimate, which can be made arbitrarily accurate by averaging results from multiple rounds of the protocol. Security measures are included which make it prohibitively expensive for a typical active participating adversary to significantly manipulate the estimates. This paper includes experimental results that demonstrate the viability, efficiency and accuracy of the protocol}, keywords = {byzantine fault tolerance, GNUnet, network size estimation, proof of work}, url = {http://grothoff.org/christian/rrsize2012.pdf}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/paper-ifip.pdf}, @@ -894,8 +886,7 @@ In this paper, we propose privacy-preserving location-based matching as a fundam address = {San Francisco, CA, USA}, abstract = {Popular anonymous communication systems often require sending packets through a sequence of relays on dilated paths for strong anonymity protection. As a result, increased end-to-end latency renders such systems inadequate for the majority of Internet users who seek an intermediate level of anonymity protection while using latency-sensitive applications, such as Web applications. This paper serves to bridge the gap between communication systems that provide strong anonymity protection but with intolerable latency and non-anonymous communication systems by considering a new design space for the setting. More specifically, we explore how to achieve near-optimal latency while achieving an intermediate level of anonymity with a weaker yet practical adversary model (i.e., protecting an end-host{\textquoteright}s identity and location from servers) such that users can choose between the level of anonymity and usability. We propose Lightweight Anonymity and Privacy (LAP), an efficient network-based solution featuring lightweight path establishment and stateless communication, by concealing an end-host{\textquoteright}s topological location to enhance anonymity against -remote tracking. To show practicality, we demonstrate that LAP can work on top of the current Internet and proposed future Internet architectures. -}, +remote tracking. To show practicality, we demonstrate that LAP can work on top of the current Internet and proposed future Internet architectures}, keywords = {anonymous communication anonymity protection, LAP}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/LAP\%3A\%20Lightweight\%20Anonymity\%20and\%20Privacy.pdf}, author = {Hsu-Chun Hsiao and Tiffany Hyun-Jin Kim and Adrian Perrig and Akira Yamada and Sam Nelson and Marco Gruteser and Wei Ming} @@ -939,8 +930,7 @@ paths and by using LASTor to visit the top 200 websites from several geographically-distributed end-hosts, we show that, in comparison to the default Tor client, LASTor reduces median latencies by 25\% while also reducing the false negative rate of -not detecting a potential snooping AS from 57\% to 11\%. -}, +not detecting a potential snooping AS from 57\% to 11\%}, keywords = {anonymous communication, as, autonomous system, Tor}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/LASTor\%3A\%20A\%20Low-Latency\%20AS-Aware\%20Tor\%20Client.pdf}, author = {Masoud Akhoondi and Curtis Yu and Harsha V. Madhyastha} @@ -1000,8 +990,7 @@ In this master thesis we introduce Monkey, a new tool that provides a solution f Existing solutions for NAT traversal can remedy the restrictions, but still there is a fraction of home users which lack support of it, especially when it comes to TCP. We present a framework for traversing NAT routers by exploiting their built-in FTP and IRC application-level gateways (ALG) for arbitrary TCP-based applications. While this does not work in every scenario, it significantly improves the success chance without requiring any user interaction at all. To demonstrate the framework, we show -a small test setup with laptop computers and home NAT routers. -}, +a small test setup with laptop computers and home NAT routers}, keywords = {FTP-ALG, NAT}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WHW_12-NTALG.pdf}, author = {Wander, M. and Holzapfel, S. and Wacker, A. and Weis, T.} @@ -1027,9 +1016,7 @@ a small test setup with laptop computers and home NAT routers. abstract = {We consider the setting of HTTP traffic over encrypted tunnels, as used to conceal the identity of websites visited by a user. It is well known that traffic analysis (TA) attacks can accurately identify the website a user visits despite the use of encryption, and previous work has looked at specific attack/countermeasure pairings. We provide the first comprehensive analysis of general-purpose TA countermeasures. We show that nine known countermeasures are vulnerable to simple attacks that exploit coarse features of traffic (e.g., total time and bandwidth). The considered countermeasures include ones like those standardized by TLS, SSH, and IPsec, and even more complex ones like the traffic morphing scheme of Wright et al. As just one of our results, we show that despite the use of traffic morphing, one can use only total upstream and downstream bandwidth to identify {\textemdash}with 98\% accuracy{\textemdash} which of two websites was visited. One implication of what we find is that, in the context of website identification, it is unlikely that bandwidth-efficient, general- -purpose TA countermeasures can ever provide the type of security targeted in prior work. - -}, +purpose TA countermeasures can ever provide the type of security targeted in prior work}, keywords = {encrypted traffic, machine learning, padding, privacy, traffic analysis countermeasures}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Peek-a-Boo\%2C\%20I\%20Still\%20See\%20You\%3A\%20Why\%20Efficient\%20Traffic\%20Analysis\%20Countermeasures\%20Fail.pdf}, author = {Kevin P. Dyer and Scott Coull and Thomas Ristenpart and Thomas Shrimpton} @@ -1088,8 +1075,7 @@ developed, ensuring query load balancing and fault tolerance, respectively. Our range query processing efficiency, access load balancing, and fault tolerance, with low replication overheads. The significance of Saturn is not only that it effectively tackles all three issues together{\textemdash}i.e., supporting range queries, ensuring load balancing, and providing fault tolerance over DHTs{\textemdash}but also that it can be applied on top of any order-preserving DHT enabling it to dynamically -handle replication and, thus, to trade off replication costs for fair load distribution and fault tolerance. -}, +handle replication and, thus, to trade off replication costs for fair load distribution and fault tolerance}, keywords = {distributed hash table, load balancing, range queries, Saturn}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/saturn-range-dht.pdf}, author = {Theoni Pitoura and Nikos Ntarmos and Peter Triantafillou} @@ -1158,9 +1144,7 @@ handle replication and, thus, to trade off replication costs for fair load distr month = jul, address = {Waterloo, Canada}, abstract = {We give a critical analysis of the system-wide anonymity metric of Edman et al. [3], which is based on the permanent value of a doubly-stochastic matrix. By providing an intuitive understanding of the permanent of such a matrix, we show that a metric that looks no further than this composite value is at best a rough indicator of anonymity. We identify situations where its inaccuracy is acute, and reveal a better anonymity indicator. Also, by constructing an information-preserving embedding of a smaller class of attacks into the wider class for which this metric was proposed, we show that this metric fails to possess desirable -generalization properties. Finally, we present a new anonymity metric that does not exhibit these shortcomings. Our new metric is accurate as well as general. - -}, +generalization properties. Finally, we present a new anonymity metric that does not exhibit these shortcomings. Our new metric is accurate as well as general}, keywords = {combinatorial matrix theory, probabilistic attacks, system-wide anonymity metric}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/PETS\%2711\%20-\%20An\%20Accurate\%20System-Wide\%20Anonymity\%20Metric\%20for\%20Probabilistic\%20Attacks.pdf}, author = {Rajiv Bagai and Huabo Lu and Rong Li and Bin Tang} @@ -1172,7 +1156,7 @@ generalization properties. Finally, we present a new anonymity metric that does publisher = {USENIX Association}, organization = {USENIX Association}, address = {San Francisco, California}, - abstract = {This paper presents details on the design and implementation of a scalable framework for evaluating peer-to-peer protocols. Unlike systems based on simulation, emulation-based systems enable the experimenter to obtain data that reflects directly on the concrete implementation in much greater detail. This paper argues that emulation is a better model for experiments with peer-to-peer protocols since it can provide scalability and high flexibility while eliminating the cost of moving from experimentation to deployment. We discuss our unique experience with large-scale emulation using the GNUnet peer-to-peer framework and provide experimental results to support these claims. }, + abstract = {This paper presents details on the design and implementation of a scalable framework for evaluating peer-to-peer protocols. Unlike systems based on simulation, emulation-based systems enable the experimenter to obtain data that reflects directly on the concrete implementation in much greater detail. This paper argues that emulation is a better model for experiments with peer-to-peer protocols since it can provide scalability and high flexibility while eliminating the cost of moving from experimentation to deployment. We discuss our unique experience with large-scale emulation using the GNUnet peer-to-peer framework and provide experimental results to support these claims }, keywords = {distributed hash table, emulation, GNUnet, scalability, security analysis}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/cset2011.pdf}, author = {Nathan S Evans and Christian Grothoff} @@ -1184,8 +1168,7 @@ generalization properties. Finally, we present a new anonymity metric that does month = feb, address = {St. Lucia}, abstract = {Anonymous blacklisting schemes allow online service providers to prevent future anonymous access by abusive users while preserving the privacy of all anonymous users (both abusive and non-abusive). The first scheme proposed for this purpose was Nymble, an extremely efficient scheme based only on symmetric primitives; however, Nymble relies on trusted third parties who can collude to de-anonymize users of the scheme. Two recently proposed schemes, Nymbler and Jack, reduce the trust placed in these third parties at the expense of using less-efficient asymmetric crypto primitives. We present BNymble, a scheme which matches the anonymity guarantees of Nymbler and Jack while (nearly) maintaining the efficiency of the original Nymble. The key insight of -BNymble is that we can achieve the anonymity goals of these more recent schemes by replacing only the infrequent {\textquotedblleft}User Registration{\textquotedblright} protocol from Nymble with asymmetric primitives. We prove the security of BNymble, and report on its efficiency. -}, +BNymble is that we can achieve the anonymity goals of these more recent schemes by replacing only the infrequent {\textquotedblleft}User Registration{\textquotedblright} protocol from Nymble with asymmetric primitives. We prove the security of BNymble, and report on its efficiency}, keywords = {anonymous access, anonymous blacklisting, BNymble}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FC\%2711\%20-\%20BNymble.pdf}, author = {Peter Lofgren and Nicholas J. Hopper} @@ -1198,8 +1181,7 @@ BNymble is that we can achieve the anonymity goals of these more recent schemes publisher = {ACM}, organization = {ACM}, address = {Chicago, IL, United States}, - abstract = {Tor is a network designed for low-latency anonymous communications. Tor clients form circuits through relays that are listed in a public directory, and then relay their encrypted traffic through these circuits. This indirection makes it difficult for a local adversary to determine with whom a particular Tor user is communicating. In response, some local adversaries restrict access to Tor by blocking each of the publicly listed relays. To deal with such an adversary, Tor uses bridges, which are unlisted relays that can be used as alternative entry points into the Tor network. Unfortunately, issues with Tor{\textquoteright}s bridge implementation make it easy to discover large numbers of bridges. An adversary that hoards this information may use it to determine when each bridge is online over time. If a bridge operator also browses with Tor on the same machine, this information may be sufficient to deanonymize him. We present BridgeSPA as a method to mitigate this issue. A client using BridgeSPA relies on innocuous single packet authorization (SPA) to present a time-limited key to a bridge. Before this authorization takes place, the bridge will not reveal whether it is online. We have implemented BridgeSPA as a working proof-of-concept, which is available under an open-source licence. -}, + abstract = {Tor is a network designed for low-latency anonymous communications. Tor clients form circuits through relays that are listed in a public directory, and then relay their encrypted traffic through these circuits. This indirection makes it difficult for a local adversary to determine with whom a particular Tor user is communicating. In response, some local adversaries restrict access to Tor by blocking each of the publicly listed relays. To deal with such an adversary, Tor uses bridges, which are unlisted relays that can be used as alternative entry points into the Tor network. Unfortunately, issues with Tor{\textquoteright}s bridge implementation make it easy to discover large numbers of bridges. An adversary that hoards this information may use it to determine when each bridge is online over time. If a bridge operator also browses with Tor on the same machine, this information may be sufficient to deanonymize him. We present BridgeSPA as a method to mitigate this issue. A client using BridgeSPA relies on innocuous single packet authorization (SPA) to present a time-limited key to a bridge. Before this authorization takes place, the bridge will not reveal whether it is online. We have implemented BridgeSPA as a working proof-of-concept, which is available under an open-source licence}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WPES\%2711\%20-\%20bridgeSPA.pdf}, author = {Rob Smits and Divam Jain and Sarah Pidcock and Ian Goldberg and Urs Hengartner} } @@ -1213,8 +1195,7 @@ BNymble is that we can achieve the anonymity goals of these more recent schemes address = {Chicago, IL, United States}, abstract = {Many users face surveillance of their Internet communications and a significant fraction suffer from outright blocking of certain destinations. Anonymous communication systems allow users to conceal the destinations they communicate with, but do not hide the fact that the users are using them. The mere use of such systems may invite suspicion, or access to them may be blocked. We therefore propose Cirripede, a system that can be used for unobservable communication with Internet destinations. Cirripede is designed to be deployed by ISPs; it intercepts connections from clients to innocent-looking destinations and redirects them to the true destination requested by the client. The communication is encoded in a way that is indistinguishable from normal communications to anyone without the master secret key, while public-key cryptography is used to eliminate the need for any secret information that must be shared with Cirripede users. -Cirripede is designed to work scalably with routers that handle large volumes of traffic while imposing minimal overhead on ISPs and not disrupting existing traffic. This allows Cirripede proxies to be strategically deployed at central locations, making access to Cirripede very difficult to block. We built a proof-of-concept implementation of Cirripede and performed a testbed evaluation of its performance properties. -}, +Cirripede is designed to work scalably with routers that handle large volumes of traffic while imposing minimal overhead on ISPs and not disrupting existing traffic. This allows Cirripede proxies to be strategically deployed at central locations, making access to Cirripede very difficult to block. We built a proof-of-concept implementation of Cirripede and performed a testbed evaluation of its performance properties}, keywords = {censorship-resistance, unobservability}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CCS\%2711\%20-\%20Cirripede.pdf}, author = {Amir Houmansadr and Giang T. K. Nguyen and Matthew Caesar and Borisov, Nikita} @@ -1249,8 +1230,7 @@ tual consistency. We call these types Convergent or Commutative Replicated Data eration based, and provides a sufficient condition appropriate for each case. It describes several useful CRDTs, including container data types supporting both add and remove op- erations with clean semantics, and more complex types such as graphs, montonic DAGs, -and sequences. It discusses some properties needed to implement non-trivial CRDTs. -}, +and sequences. It discusses some properties needed to implement non-trivial CRDTs}, keywords = {commutative operations, data replication, optimistic replication}, isbn = {0249-6399}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/crdt.pdf}, @@ -1277,8 +1257,7 @@ and sequences. It discusses some properties needed to implement non-trivial CRDT month = aug, address = {San Francisco, CA, USA}, abstract = {We present decoy routing, a mechanism capable of circumventing common network filtering strategies. Unlike other circumvention techniques, decoy routing does not require a client to connect to a specific IP address (which -is easily blocked) in order to provide circumvention. We show that if it is possible for a client to connect to any unblocked host/service, then decoy routing could be used to connect them to a blocked destination without cooperation from the host. This is accomplished by placing the circumvention service in the network itself {\textendash} where a single device could proxy traffic between a significant fraction of hosts {\textendash} instead of at the edge. -}, +is easily blocked) in order to provide circumvention. We show that if it is possible for a client to connect to any unblocked host/service, then decoy routing could be used to connect them to a blocked destination without cooperation from the host. This is accomplished by placing the circumvention service in the network itself {\textendash} where a single device could proxy traffic between a significant fraction of hosts {\textendash} instead of at the edge}, keywords = {decoy routing, Internet communication, network filter}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FOCI\%2711\%20-\%20Decoy\%20Routing\%3A\%20Toward\%20Unblockable\%20Internet\%20Communication.pdf}, author = {Josh Karlin and Daniel Ellard and Alden W. Jackson and Christine E. Jones and Greg Lauer and David P. Mankins and W. Timothy Strayer} @@ -1289,8 +1268,7 @@ is easily blocked) in order to provide circumvention. We show that if it is poss year = {2011}, month = jul, address = {Waterloo, Canada}, - abstract = {Tor is one of the most widely used privacy enhancing technologies for achieving online anonymity and resisting censorship. While conventional wisdom dictates that the level of anonymity offered by Tor increases as its user base grows, the most significant obstacle to Tor adoption continues to be its slow performance. We seek to enhance Tor{\textquoteright}s performance by offering techniques to control congestion and improve flow control, thereby reducing unnecessary delays. To reduce congestion, we first evaluate small fixed-size circuit windows and a dynamic circuit window that adaptively re-sizes in response to perceived congestion. While these solutions improve web page response times and require modification only to exit routers, they generally offer poor flow control and slower downloads relative to Tor{\textquoteright}s current design. To improve flow control while reducing congestion, we implement N23, an ATM-style per-link algorithm that allows Tor routers to explicitly cap their queue lengths and signal congestion via back-pressure. Our results show that N23 offers better congestion and flow control, resulting in improved web page response times and faster page loads compared to Tor{\textquoteright}s current design and other window-based approaches. We also argue that our proposals do not enable any new attacks on Tor users{\textquoteright} privacy. -}, + abstract = {Tor is one of the most widely used privacy enhancing technologies for achieving online anonymity and resisting censorship. While conventional wisdom dictates that the level of anonymity offered by Tor increases as its user base grows, the most significant obstacle to Tor adoption continues to be its slow performance. We seek to enhance Tor{\textquoteright}s performance by offering techniques to control congestion and improve flow control, thereby reducing unnecessary delays. To reduce congestion, we first evaluate small fixed-size circuit windows and a dynamic circuit window that adaptively re-sizes in response to perceived congestion. While these solutions improve web page response times and require modification only to exit routers, they generally offer poor flow control and slower downloads relative to Tor{\textquoteright}s current design. To improve flow control while reducing congestion, we implement N23, an ATM-style per-link algorithm that allows Tor routers to explicitly cap their queue lengths and signal congestion via back-pressure. Our results show that N23 offers better congestion and flow control, resulting in improved web page response times and faster page loads compared to Tor{\textquoteright}s current design and other window-based approaches. We also argue that our proposals do not enable any new attacks on Tor users{\textquoteright} privacy}, keywords = {congestion, DefenestraTor, online anonymity, performance, privacy enhancing technologies, Tor, Windows}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/PETS\%2711\%20-\%20DefenestraTor.pdf}, author = {Mashael AlSabah and Kevin Bauer and Ian Goldberg and Dirk Grunwald and Damon McCoy and Stefan Savage and Geoffrey M. Voelker} @@ -1315,8 +1293,7 @@ Our results also yield new separations between the local and global models of co month = aug, address = {San Francisco, CA, USA}, abstract = {Tor is one of the most widely-used privacy enhancing technologies for achieving online anonymity and resisting censorship. Simultaneously, Tor is also an evolving research network on which investigators perform experiments to improve the network{\textquoteright}s resilience to attacks and enhance its performance. Existing methods for studying Tor have included analytical modeling, simulations, small-scale network emulations, small-scale PlanetLab deployments, and measurement and analysis of the live Tor network. Despite the growing body of work concerning Tor, there is no widely accepted methodology for conducting Tor research in a manner that preserves realism while protecting live users{\textquoteright} privacy. In an effort to propose a standard, rigorous experimental framework for -conducting Tor research in a way that ensures safety and realism, we present the design of ExperimenTor, a large-scale Tor network emulation toolkit and testbed. We also report our early experiences with prototype testbeds currently deployed at four research institutions. -}, +conducting Tor research in a way that ensures safety and realism, we present the design of ExperimenTor, a large-scale Tor network emulation toolkit and testbed. We also report our early experiences with prototype testbeds currently deployed at four research institutions}, keywords = {experimentation, ExperimenTor, privacy enhancing technologies, Tor}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CSET\%2711\%20-\%20ExperimenTor.pdf}, author = {Kevin Bauer and Micah Sherr and Damon McCoy and Dirk Grunwald} @@ -1328,8 +1305,7 @@ conducting Tor research in a way that ensures safety and realism, we present the month = dec, address = {Orlando, FL, USA}, abstract = {Tor is a volunteer-operated network of application-layer relays that enables users to communicate privately and anonymously. Unfortunately, Tor often exhibits poor performance due to congestion caused by the unbalanced ratio of clients to available relays, as well as a disproportionately high consumption of network capacity by a small fraction of filesharing users. -This paper argues the very counterintuitive notion that slowing down traffic on Tor will increase the bandwidth capacity of the network and consequently improve the experience of interactive web users. We introduce Tortoise, a system for rate limiting Tor at its ingress points. We demonstrate that Tortoise incurs little penalty for interactive web users, while significantly decreasing the throughput for filesharers. Our techniques provide incentives to filesharers to configure their Tor clients to also relay traffic, which in turn improves the network{\textquoteright}s overall performance. We present large-scale emulation results that indicate that interactive users will achieve a significant speedup if even a small fraction of clients opt to run relays. -}, +This paper argues the very counterintuitive notion that slowing down traffic on Tor will increase the bandwidth capacity of the network and consequently improve the experience of interactive web users. We introduce Tortoise, a system for rate limiting Tor at its ingress points. We demonstrate that Tortoise incurs little penalty for interactive web users, while significantly decreasing the throughput for filesharers. Our techniques provide incentives to filesharers to configure their Tor clients to also relay traffic, which in turn improves the network{\textquoteright}s overall performance. We present large-scale emulation results that indicate that interactive users will achieve a significant speedup if even a small fraction of clients opt to run relays}, keywords = {anonymity, performance, Tor}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ACSAC\%2711\%20-\%20Tortoise.pdf}, author = {W. Brad Moore and Chris Wacek and Micah Sherr} @@ -1341,8 +1317,7 @@ This paper argues the very counterintuitive notion that slowing down traffic on month = dec, address = {Orlando, FL, USA}, abstract = {Traffic watermarking is an important element in many network security and privacy applications, such as tracing botnet C\&C communications and deanonymizing peer-to-peer VoIP calls. The state-of-the-art traffic watermarking schemes are usually based on packet timing information and they are notoriously difficult to detect. In this paper, we show for the first time that even the most sophisticated timing-based watermarking schemes (e.g., RAINBOW and SWIRL) are not invisible by proposing a new detection system called BACKLIT. BACKLIT is designed according to the observation that any practical timing-based traffic watermark will cause noticeable alterations in the intrinsic timing features typical of TCP flows. We propose five metrics that are sufficient for detecting four state-of-the-art traffic watermarks for bulk transfer and interactive traffic. BACKLIT can be easily deployed in stepping stones and anonymity networks (e.g., Tor), because it does not rely on strong assumptions and can be realized in an active or passive mode. We have conducted extensive experiments to evaluate BACKLIT{\textquoteright}s detection performance using the PlanetLab platform. The results show that BACKLIT can detect watermarked network flows -with high accuracy and few false positives. -}, +with high accuracy and few false positives}, keywords = {BACKLIT, detection system, invisible, network security, packet timing information, privacy, traffic watermark}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ACSAC\%2711\%20-\%20BACKLIT.pdf}, author = {Xiapu Luo and Peng Zhou and Junjie Zhang and Roberto Perdisci and Wenke Lee and Rocky K. C. Chang} @@ -1356,8 +1331,7 @@ with high accuracy and few false positives. organization = {ACM}, address = {Chicago, IL, United States}, abstract = {We introduce Faust, a solution to the {\textquotedblleft}anonymous blacklisting problem:{\textquotedblright} allow an anonymous user to prove that she is authorized to access an online service such that if the user misbehaves, she retains her anonymity but will be unable to -authenticate in future sessions. Faust uses no trusted third parties and is one to two orders of magnitude more efficient than previous schemes without trusted third parties. The key idea behind Faust is to eliminate the explicit blacklist used in all previous approaches, and rely instead on an implicit whitelist, based on blinded authentication tokens. -}, +authenticate in future sessions. Faust uses no trusted third parties and is one to two orders of magnitude more efficient than previous schemes without trusted third parties. The key idea behind Faust is to eliminate the explicit blacklist used in all previous approaches, and rely instead on an implicit whitelist, based on blinded authentication tokens}, keywords = {anonymous authentication, anonymous blacklisting, privacy-enhancing revocation}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WPES\%2711\%20-\%20FAUST.pdf}, author = {Peter Lofgren and Nicholas J. Hopper} @@ -1387,9 +1361,7 @@ authenticate in future sessions. Faust uses no trusted third parties and is one abstract = {Anonymous communications networks, such as Tor, help to solve the real and important problem of enabling users to communicate privately over the Internet. However, in doing so, anonymous communications networks introduce an entirely new problem for the service providers{\textemdash}such as websites, IRC networks or mail servers{\textemdash}with which these users interact; in particular, since all anonymous users look alike, there is no way for the service providers to hold individual misbehaving anonymous users accountable for their actions. Recent research efforts have focused on using anonymous blacklisting systems (which are sometimes called anonymous revocation systems) to empower service providers with the ability to revoke access from abusive anonymous users. In contrast to revocable anonymity systems, which enable some trusted third party to deanonymize users, anonymous blacklisting systems provide users with a way to authenticate anonymously with a service provider, while enabling the service provider to revoke access from any users that misbehave, without revealing their identities. In this paper, we introduce the anonymous blacklisting problem and survey the literature on anonymous blacklisting systems, comparing and contrasting the architecture of various existing schemes, and discussing the tradeoffs inherent with each design. The literature on anonymous blacklisting systems lacks a unified set of definitions; each scheme operates under different trust assumptions and provides different security and privacy guarantees. Therefore, before we discuss the existing approaches in detail, we first propose a formal definition for anonymous blacklisting systems, and a set of security and privacy properties that these systems should possess. We also -outline a set of new performance requirements that anonymous blacklisting systems should satisfy to maximize their potential for real-world adoption, and give formal definitions for several optional features already supported by some schemes in the literature. - -}, +outline a set of new performance requirements that anonymous blacklisting systems should satisfy to maximize their potential for real-world adoption, and give formal definitions for several optional features already supported by some schemes in the literature}, keywords = {anonymity, anonymous blacklisting, authentication, privacy enhancing technologies, privacy-enhanced revocation}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Formalizing\%20Anonymous\%20Blacklisting\%20Systems.pdf}, author = {Ryan Henry and Ian Goldberg} @@ -1399,7 +1371,7 @@ outline a set of new performance requirements that anonymous blacklisting system booktitle = {SysSec 2011}, year = {2011}, address = {Amsterdam, Netherlands}, - abstract = {This paper introduces the current research and future plans of the Free Secure Network Systems Group at the Technische Universit\&auml;t M\&uuml;nchen. In particular, we provide some insight into the development process and architecture of the GNUnet P2P framework and the challenges we are currently working on. }, + abstract = {This paper introduces the current research and future plans of the Free Secure Network Systems Group at the Technische Universit\&auml;t M\&uuml;nchen. In particular, we provide some insight into the development process and architecture of the GNUnet P2P framework and the challenges we are currently working on}, keywords = {anonymity, GNUnet, routing}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/syssec2011.pdf}, author = {Christian Grothoff} @@ -1455,8 +1427,7 @@ To better understand the security and performance properties of a popular low la year = {2011}, month = feb, address = {St. Lucia}, - abstract = {In this paper we investigate the impact of missing replay protection as well as missing integrity protection concerning a local attacker in AN.ON. AN.ON is a low latency anonymity network mostly used to anonymize web traffic. We demonstrate that both protection mechanisms are important by presenting two attacks that become feasible as soon as the mechanisms are missing. We mount both attacks on the AN.ON network which neither implements replay protection nor integrity protection yet. -}, + abstract = {In this paper we investigate the impact of missing replay protection as well as missing integrity protection concerning a local attacker in AN.ON. AN.ON is a low latency anonymity network mostly used to anonymize web traffic. We demonstrate that both protection mechanisms are important by presenting two attacks that become feasible as soon as the mechanisms are missing. We mount both attacks on the AN.ON network which neither implements replay protection nor integrity protection yet}, keywords = {AN.ON, anonymity network, integrity protection, replay protection}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FC\%2711\%20-\%20Malice\%20versus\%20AN.ON_.pdf}, author = {Benedikt Westermann and Dogan Kesdogan} @@ -1496,8 +1467,7 @@ Another technique we use in our implementation to help address the connectivity which helped when designing the distance vector routing protocol for our system. Finally, we present the design of our new secure randomized routing algorithm that does not suffer from the various problems we discovered in previous designs. Goals for the algorithm include providing efficiency and robustness in the presence of malicious participants for an open, fully decentralized network without trusted authorities. We provide a mathematical analysis of the algorithm itself and have created and deployed an implementation of this algorithm in GNUnet. In this thesis we also provide a detailed overview of a distributed -emulation framework capable of running a large number of nodes using our full code base as well as some of the challenges encountered in creating and using such a testing framework. We present extensive experimental results showing that our routing algorithm outperforms the dominant DHT design in target topologies, and performs comparably in other scenarios. -}, +emulation framework capable of running a large number of nodes using our full code base as well as some of the challenges encountered in creating and using such a testing framework. We present extensive experimental results showing that our routing algorithm outperforms the dominant DHT design in target topologies, and performs comparably in other scenarios}, keywords = {distributed hash table, Freenet, GNUnet, NAT, R5N, Tor}, isbn = {3-937201-26-2}, issn = {1868-2642}, @@ -1545,8 +1515,7 @@ emulation framework capable of running a large number of nodes using our full co Traditionally, these schemes have relied on powerful Trusted Third Parties (TTPs) capable of deanonymizing (or linking) users{\textquoteright} connections. Such TTPs are undesirable because users{\textquoteright} anonymity is not guaranteed, and users must trust them to judge {\textquoteleft}misbehavior{\textquoteright} fairly. Recent schemes such as Blacklistable Anonymous Credentials (BLAC) and Enhanced Privacy ID (EPID) support {\textquotedblleft}privacy-enhanced revocation{\textquotedblright} {\textemdash} servers can revoke misbehaving users without a TTP{\textquoteright}s involvement, and without learning the revoked users{\textquoteright} identities. In BLAC and EPID, however, the computation required for authentication at the server is linear in the size (L) of the revocation list, which is impractical as the size approaches thousands of entries. We propose PEREA, a new anonymous authentication scheme for which this bottleneck of computation is independent of the size of the revocation list. Instead, the time complexity of authentication is linear in the size of a revocation window K L, the number of subsequent authentications before which a user{\textquoteright}s misbehavior must be recognized if the user is to be revoked. We extend PEREA to support more complex revocation policies that take the severity of misbehaviors into account. Users can authenticate anonymously if their naughtiness, i.e., the sum of the severities of their blacklisted misbehaviors, is below a certain naughtiness threshold. -We call our extension PEREA-Naughtiness. We prove the security of our constructions, and validate their efficiency as compared to BLAC both analytically and quantitatively. -}, +We call our extension PEREA-Naughtiness. We prove the security of our constructions, and validate their efficiency as compared to BLAC both analytically and quantitatively}, keywords = {anonymous authentication, anonymous blacklisting, privacy, privacy-enhanced revocation, user misbehavior}, issn = {1094-9224}, doi = {http://doi.acm.org/10.1145/2043628.2043630}, @@ -1572,8 +1541,7 @@ We call our extension PEREA-Naughtiness. We prove the security of our constructi month = aug, address = {San Francisco, CA, USA}, abstract = {Existing anonymous communication systems like Tor do not scale well as they require all users to maintain up-to-date information about all available Tor relays in the system. Current proposals for scaling anonymous communication advocate a peer-to-peer (P2P) approach. While the P2P paradigm scales to millions of nodes, it provides new opportunities to compromise anonymity. In this paper, we step away from the P2P paradigm and advocate a client-server approach to scalable anonymity. We propose PIR-Tor, an architecture for the Tor network in which users obtain information about only a few onion routers using private information retrieval techniques. Obtaining information about only a few onion routers is the key to the scalability of our approach, while the use of private retrieval information techniques helps preserve client anonymity. The security of our architecture depends on the security of PIR schemes which are -well understood and relatively easy to analyze, as opposed to peer-to-peer designs that require analyzing extremely complex and dynamic systems. In particular, we demonstrate that reasonable parameters of our architecture provide equivalent security to that of the Tor network. Moreover, our experimental results show that the overhead of PIR-Tor is manageable even when the Tor network scales by two orders of magnitude. -}, +well understood and relatively easy to analyze, as opposed to peer-to-peer designs that require analyzing extremely complex and dynamic systems. In particular, we demonstrate that reasonable parameters of our architecture provide equivalent security to that of the Tor network. Moreover, our experimental results show that the overhead of PIR-Tor is manageable even when the Tor network scales by two orders of magnitude}, keywords = {anonymous communication, peer to peer, PIR-Tor}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/USENIX\%20-\%20PIR-Tor.pdf}, author = {Prateek Mittal and Femi Olumofin and Carmela Troncoso and Borisov, Nikita and Ian Goldberg} @@ -1623,8 +1591,7 @@ service. This thesis provides the necessary background on I2P, gives details on the attack --- including experimental data from measurements against the -actual I2P network --- and discusses possible solutions. -}, +actual I2P network --- and discusses possible solutions}, keywords = {anonymity, attack, denial-of-service, I2P}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/herrmann2011mt.pdf}, author = {Michael Herrmann} @@ -1676,8 +1643,7 @@ private mechanism, will only gain a negligible advantage (up to a privacy parame year = {2011}, month = feb, address = {St. Lucia}, - abstract = {Many people currently use proxies to circumvent government censorship that blocks access to content on the Internet. Unfortunately, the dissemination channels used to distribute proxy server locations are increasingly being monitored to discover and quickly block these proxies. This has given rise to a large number of ad hoc dissemination channels that leverage trust networks to reach legitimate users and at the same time prevent proxy server addresses from falling into the hands of censors. To address this problem in a more principled manner, we present Proximax, a robust system that continuously distributes pools of proxies to a large number of channels. The key research challenge in Proximax is to distribute the proxies among the different channels in a way that maximizes the usage of these proxies while minimizing the risk of having them blocked. This is challenging because of two conflicting goals: widely disseminating the location of the proxies to fully utilize their capacity and preventing (or at least delaying) their discovery by censors. We present a practical system that lays out a design and analytical model that balances these factors. -}, + abstract = {Many people currently use proxies to circumvent government censorship that blocks access to content on the Internet. Unfortunately, the dissemination channels used to distribute proxy server locations are increasingly being monitored to discover and quickly block these proxies. This has given rise to a large number of ad hoc dissemination channels that leverage trust networks to reach legitimate users and at the same time prevent proxy server addresses from falling into the hands of censors. To address this problem in a more principled manner, we present Proximax, a robust system that continuously distributes pools of proxies to a large number of channels. The key research challenge in Proximax is to distribute the proxies among the different channels in a way that maximizes the usage of these proxies while minimizing the risk of having them blocked. This is challenging because of two conflicting goals: widely disseminating the location of the proxies to fully utilize their capacity and preventing (or at least delaying) their discovery by censors. We present a practical system that lays out a design and analytical model that balances these factors}, keywords = {government censorship, Proximax, proxy}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FC\%2711\%20-\%20Proximax.pdf}, author = {Kirill Levchenko and Damon McCoy} @@ -1697,8 +1663,7 @@ private mechanism, will only gain a negligible advantage (up to a privacy parame organization = {IEEE}, address = {Milan, Italy}, abstract = {This paper describes a new secure DHT routing algorithm for open, -decentralized P2P networks operating in a restricted-route environment with malicious participants. We have implemented our routing algorithm and have evaluated its performance under various topologies and in the presence of malicious peers. For small-world topologies, our algorithm provides significantly better performance when compared to existing methods. In more densely connected topologies, our performance is better than or on par with other designs. -}, +decentralized P2P networks operating in a restricted-route environment with malicious participants. We have implemented our routing algorithm and have evaluated its performance under various topologies and in the presence of malicious peers. For small-world topologies, our algorithm provides significantly better performance when compared to existing methods. In more densely connected topologies, our performance is better than or on par with other designs}, keywords = {distributed hash table, GNUnet, R5N, routing}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/nss2011.pdf}, author = {Nathan S Evans and Christian Grothoff} @@ -1738,7 +1703,7 @@ privacy and leakage, due to the graph symmetries induced by the adjacency relati year = {2011}, month = jun, address = {Berlin, Germany}, - abstract = {There{\textquoteright}s a lot of buzz out there about "replacing" Facebook with a privacy-enhanced, decentralized, ideally open source something. In this talk we{\textquoteright}ll focus on how much privacy we should plan for (specifically about how we cannot entrust our privacy to modern virtual machine technology) and the often underestimated problem of getting such a monster network to function properly. These issues can be considered together or separately: Even if you{\textquoteright}re not as concerned about privacy as we are, the scalability problem still persists. }, + abstract = {There{\textquoteright}s a lot of buzz out there about "replacing" Facebook with a privacy-enhanced, decentralized, ideally open source something. In this talk we{\textquoteright}ll focus on how much privacy we should plan for (specifically about how we cannot entrust our privacy to modern virtual machine technology) and the often underestimated problem of getting such a monster network to function properly. These issues can be considered together or separately: Even if you{\textquoteright}re not as concerned about privacy as we are, the scalability problem still persists }, keywords = {GNUnet, privacy, social networks}, url = {http://secushare.org/2011-FSW-Scalability-Paranoia}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/2011-FSW-Scalability-Paranoia.pdf}, @@ -1785,8 +1750,7 @@ privacy and leakage, due to the graph symmetries induced by the adjacency relati address = {San Diego, CA, USA}, abstract = {We present a cryptographic framework to achieve access control, privacy of social relations, secrecy of resources, and anonymity of users in social networks. We illustrate our technique on a core API for social networking, which includes methods for establishing social relations and for sharing resources. The cryptographic protocols implementing these methods use pseudonyms to hide user identities, signatures on these pseudonyms to establish social relations, and zero-knowledge proofs of knowledge of such signatures to demonstrate the existence of social relations without sacrificing user anonymity. As we do not put any constraints on the underlying social network, our framework is generally applicable and, in particular, constitutes an ideal plug-in for decentralized social networks. -We analyzed the security of our protocols by developing formal definitions of the aforementioned security properties and by verifying them using ProVerif, an automated theorem prover for cryptographic protocols. Finally, we built a prototypical implementation and conducted an experimental evaluation to demonstrate the efficiency and the scalability of our framework. -}, +We analyzed the security of our protocols by developing formal definitions of the aforementioned security properties and by verifying them using ProVerif, an automated theorem prover for cryptographic protocols. Finally, we built a prototypical implementation and conducted an experimental evaluation to demonstrate the efficiency and the scalability of our framework}, keywords = {API, online-social-networks, security}, url = {http://www.lbs.cs.uni-saarland.de/publications/sapi.pdf }, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/NDSS\%2711\%20-\%20Security\%20API\%20for\%20Distributed\%20Social\%20Networks.pdf}, @@ -1827,8 +1791,7 @@ In this paper, we embrace the social aspects of the Web 2.0 by considering a nov abstract = {Anonymity systems such as Tor aim to enable users to communicate in a manner that is untraceable by adversaries that control a small number of machines. To provide efficient service to users, these anonymity systems make full use of forwarding capacity when sending traffic between intermediate relays. In this paper, we show that doing this leaks information about the set of Tor relays in a circuit (path). We present attacks that, with high confidence and based solely on throughput information, can (a) reduce the attacker{\textquoteright}s uncertainty about the bottleneck relay of any Tor circuit whose throughput can be observed, (b) exactly identify the guard relay(s) of a Tor user when circuit throughput can be observed over multiple connections, and (c) identify whether two concurrent TCP connections belong to the same Tor user, breaking unlinkability. Our attacks are stealthy, and cannot be readily detected by a user or by Tor relays. We validate our attacks using experiments over the live Tor network. We find that the attacker can substantially reduce the entropy of a bottleneck relay distribution of a Tor circuit whose throughput can be observed{\textemdash}the entropy gets reduced by a factor of 2 in the median case. Such information leaks from a single Tor circuit can be combined over multiple connections to exactly identify a user{\textquoteright}s guard relay(s). Finally, we are also able to link two connections from the same initiator with a crossover error rate of less -than 1.5\% in under 5 minutes. Our attacks are also more accurate and require fewer resources than previous attacks on Tor. -}, +than 1.5\% in under 5 minutes. Our attacks are also more accurate and require fewer resources than previous attacks on Tor}, keywords = {anonymity, attacks, throughput}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CCS\%2711\%20-\%20Throughput-fingerprinting.pdf}, author = {Prateek Mittal and Ahmed Khurshid and Joshua Juen and Matthew Caesar and Borisov, Nikita} @@ -1841,8 +1804,7 @@ than 1.5\% in under 5 minutes. Our attacks are also more accurate and require fe address = {San Diego, CA, USA}, abstract = {Flow watermarks are active traffic analysis techniques that help establish a causal connection between two network flows by content-independent manipulations, e.g., altering packet timings. Watermarks provide a much more scalable approach for flow correlation than passive traffic analysis. Previous designs of scalable watermarks, however, were subject to multi-flow attacks. They also introduced delays too large to be used in most environments. We design SWIRL, a Scalable Watermark that is Invisible and Resilient to packet Losses. SWIRL is the first watermark that is practical to use for large-scale traffic analysis. SWIRL uses a flow-dependent approach to resist multi-flow -attacks, marking each flow with a different pattern. SWIRL is robust to packet losses and network jitter, yet it introduces only small delays that are invisible to both benign users and determined adversaries. We analyze the performance of SWIRL both analytically and on the PlanetLab testbed, demonstrating very low error rates. We consider applications of SWIRL to stepping stone detection and linking anonymous communication. We also propose a novel application of watermarks to defend against congestion attacks on Tor. -}, +attacks, marking each flow with a different pattern. SWIRL is robust to packet losses and network jitter, yet it introduces only small delays that are invisible to both benign users and determined adversaries. We analyze the performance of SWIRL both analytically and on the PlanetLab testbed, demonstrating very low error rates. We consider applications of SWIRL to stepping stone detection and linking anonymous communication. We also propose a novel application of watermarks to defend against congestion attacks on Tor}, keywords = {anonymity, SWIRL, traffic analysis, watermarking}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/NDSS11-2.pdf}, author = {Amir Houmansadr and Borisov, Nikita} @@ -1854,8 +1816,7 @@ attacks, marking each flow with a different pattern. SWIRL is robust to packet l month = aug, address = {San Francisco, CA, USA}, abstract = {In this paper, we present Telex, a new approach to resisting state-level Internet censorship. Rather than attempting to win the cat-and-mouse game of finding open proxies, we leverage censors{\textquoteright} unwillingness to completely block day-to-day Internet access. In effect, Telex converts innocuous, unblocked websites into proxies, without their explicit collaboration. We envision that friendly ISPs would deploy Telex stations on paths between censors{\textquoteright} networks and popular, uncensored Internet destinations. Telex stations would monitor seemingly innocuous flows for a special {\textquotedblleft}tag{\textquotedblright} and transparently divert them to a forbidden website or service instead. We propose a new cryptographic scheme based on elliptic curves for tagging TLS handshakes such that the tag is visible to a Telex -station but not to a censor. In addition, we use our tagging scheme to build a protocol that allows clients to connect to Telex stations while resisting both passive and active attacks. We also present a proof-of-concept implementation that demonstrates the feasibility of our system. -}, +station but not to a censor. In addition, we use our tagging scheme to build a protocol that allows clients to connect to Telex stations while resisting both passive and active attacks. We also present a proof-of-concept implementation that demonstrates the feasibility of our system}, keywords = {anticensorship, network infrastructure state-level censorship, proxy, telex}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Telex\%3A\%20Anticensorship\%20in\%20the\%20Network\%20Infrastructure.pdf}, author = {Eric Wustrow and Scott Wolchok and Ian Goldberg and J. Alex Halderman} @@ -1868,8 +1829,7 @@ station but not to a censor. In addition, we use our tagging scheme to build a p publisher = {ACM}, organization = {ACM}, address = {Chicago, IL, United States}, - abstract = {We introduce a novel model of routing security that incorporates the ordinarily overlooked variations in trust that users have for different parts of the network. We focus on anonymous communication, and in particular onion routing, although we expect the approach to apply more broadly. This paper provides two main contributions. First, we present a novel model to consider the various security concerns for route selection in anonymity networks when users vary their trust over parts of the network. Second, to show the usefulness of our model, we present as an example a new algorithm to select paths in onion routing. We analyze its effectiveness against deanonymization and other information leaks, and particularly how it fares in our model versus existing algorithms, which do not consider trust. In contrast to those, we find that our trust-based routing strategy can protect anonymity against an adversary capable of attacking a significant fraction of the network. -}, + abstract = {We introduce a novel model of routing security that incorporates the ordinarily overlooked variations in trust that users have for different parts of the network. We focus on anonymous communication, and in particular onion routing, although we expect the approach to apply more broadly. This paper provides two main contributions. First, we present a novel model to consider the various security concerns for route selection in anonymity networks when users vary their trust over parts of the network. Second, to show the usefulness of our model, we present as an example a new algorithm to select paths in onion routing. We analyze its effectiveness against deanonymization and other information leaks, and particularly how it fares in our model versus existing algorithms, which do not consider trust. In contrast to those, we find that our trust-based routing strategy can protect anonymity against an adversary capable of attacking a significant fraction of the network}, keywords = {anonymous communication, onion routing, privacy, trust}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CCS\%2711\%20-\%20Trust-based\%20Anonymous\%20Communication1.pdf}, author = {Aaron Johnson and Paul Syverson and Roger Dingledine and Nick Mathewson} @@ -1903,8 +1863,7 @@ station but not to a censor. In addition, we use our tagging scheme to build a p abstract = {Low-latency anonymization networks such as Tor and JAP claim to hide the recipient and the content of communications from a local observer, i.e., an entity that can eavesdrop the traffic between the user and the first anonymization node. Especially users in totalitarian regimes strongly depend on such networks to freely communicate. For these people, anonymity is particularly important and an analysis of the anonymization methods against various attacks is necessary to ensure adequate protection. In this paper we show that anonymity in Tor and JAP is not as strong as expected so far and cannot resist website fingerprinting attacks under certain circumstances. We first define features for website fingerprinting solely based on volume, time, and direction of the traffic. As a result, the subsequent classification becomes much easier. We apply support vector machines with the introduced features. We are able to improve recognition results of existing works on a given state-of-the-art dataset in Tor from 3\% to 55\% and in JAP from 20\% to 80\%. The datasets assume a closed-world with 775 websites only. In a next step, we transfer our findings to a more complex and realistic open-world scenario, i.e., recognition of several websites in a set of thousands of random unknown websites. To the best of our knowledge, this work is the first successful attack in the open-world scenario. We achieve a surprisingly high true positive rate of up to 73\% for a false positive rate of 0.05\%. Finally, we show preliminary results of a proof-of-concept implementation that applies camouflage as a countermeasure to hamper the fingerprinting attack. For -JAP, the detection rate decreases from 80\% to 4\% and for Tor it drops from 55\% to about 3\%. -}, +JAP, the detection rate decreases from 80\% to 4\% and for Tor it drops from 55\% to about 3\%}, keywords = {anonymous communication, pattern recognition, privacy, traffic analysis, website fingerprinting}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WPES\%2711\%20-\%20Fingerprinting.pdf}, author = {Andriy Panchenko and Lukas Niessen and Andreas Zinnen and Thomas Engel} @@ -2144,8 +2103,7 @@ To evaluate the platform{\textquoteright}s suitability for application developme address = {Atlanta, Georgia, USA}, abstract = {We present a new solution to protect the widely deployed KAD DHT against localized attacks which can take control over DHT entries. We show through measurements that the IDs distribution of the best peers found after a lookup process follows a geometric distribution. We then use this result to detect DHT attacks by comparing real peers{\textquoteright} ID distributions to the theoretical one thanks to the Kullback-Leibler divergence. When an attack is detected, we propose countermeasures that progressively remove suspicious peers from the list of possible contacts to provide a safe DHT access. Evaluations show that our -method detects the most efficient attacks with a very small false-negative rate, while countermeasures successfully filter almost all malicious peers involved in an attack. Moreover, our solution completely fits the current design of the KAD network and introduces no network overhead. -}, +method detects the most efficient attacks with a very small false-negative rate, while countermeasures successfully filter almost all malicious peers involved in an attack. Moreover, our solution completely fits the current design of the KAD network and introduces no network overhead}, keywords = {attack detection, attack mitigation, distributed hash table, IDs distribution, KAD, Sybil attack}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/HotP2P\%2710\%20-\%20KAD\%20DHT\%20attack\%20mitigation.pdf}, author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier} @@ -2758,7 +2716,7 @@ This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynam publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Rio de Janeiro, Brazil}, - abstract = {It is well-known that the overall efficiency of a distributed system can suffer if the participating entities seek to maximize their individual performance. Consequently, mechanisms have been designed that force the participants to behave more cooperatively. Most of these game-theoretic solutions rely on payments between participants. Unfortunately, such payments are often cumbersome to implement in practice, especially in dynamic networks and where transaction costs are high. In this paper, we investigate the potential of mechanisms which work without payments. We consider the problem of throughput maximization in multi-channel environments and shed light onto the throughput increase that can be achieved with and without payments. We introduce and analyze two different concepts: the worst-case leverage where we assume that players end up in the worst rational strategy profile, and the average-case leverage where player select a random non-dominated strategy. Our theoretical insights are complemented by simulations. }, + abstract = {It is well-known that the overall efficiency of a distributed system can suffer if the participating entities seek to maximize their individual performance. Consequently, mechanisms have been designed that force the participants to behave more cooperatively. Most of these game-theoretic solutions rely on payments between participants. Unfortunately, such payments are often cumbersome to implement in practice, especially in dynamic networks and where transaction costs are high. In this paper, we investigate the potential of mechanisms which work without payments. We consider the problem of throughput maximization in multi-channel environments and shed light onto the throughput increase that can be achieved with and without payments. We introduce and analyze two different concepts: the worst-case leverage where we assume that players end up in the worst rational strategy profile, and the average-case leverage where player select a random non-dominated strategy. Our theoretical insights are complemented by simulations }, keywords = {distributed systems, game-theoretic, individual performance, mechanism design, payment, throughtput maximization}, isbn = {978-1-4244-3512-8 }, doi = {http://dx.doi.org/10.1109/INFCOM.2009.5062008}, @@ -2796,8 +2754,7 @@ This thesis investigates the effectiveness of Monte-Carlo Tree Search when appli Given the results of the experiments, one can conclude that Monte-Carlo Tree Search gives a slight performance increase over standard Monte-Carlo search. In addition, the most effective improvements appeared to be the application of -pseudo-random simulations and limiting simulation lengths, while other techniques have been shown to be less effective or even ineffective. Overall, when applying the best performing techniques, an AI with advanced playing strength has been created, such that further research is likely to push this performance to a strength of expert level. -}, +pseudo-random simulations and limiting simulation lengths, while other techniques have been shown to be less effective or even ineffective. Overall, when applying the best performing techniques, an AI with advanced playing strength has been created, such that further research is likely to push this performance to a strength of expert level}, keywords = {artificial intelligence, MCTS, modern board game, Monte-Carlo Tree Search, search techniques}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Thesis\%20-\%20F.Schadd.pdf}, author = {Frederik Christiaan Schadd} @@ -2924,8 +2881,7 @@ In this paper we establish the optimal trade-off between the round complexity an We show that the attack from their paper is no longer practical on today{\textquoteright}s 1500-relay heavily loaded Tor network. The attack doesn{\textquoteright}t scale because a) the attacker needs a tremendous amount of bandwidth to measure enough relays during the attack window, and b) there are too many false positives now that many other users are adding congestion at the same time as the attacks. -We then strengthen the original congestion attack by combining it with a novel bandwidth amplification attack based on a flaw in the Tor design that lets us build long circuits that loop back on themselves. We show that this new combination attack is practical and effective by demonstrating a working attack on today{\textquoteright}s deployed Tor network. By coming up with a model to better understand Tor{\textquoteright}s routing behavior under congestion, we further provide a statistical analysis characterizing how effective our attack is in each case. -}, +We then strengthen the original congestion attack by combining it with a novel bandwidth amplification attack based on a flaw in the Tor design that lets us build long circuits that loop back on themselves. We show that this new combination attack is practical and effective by demonstrating a working attack on today{\textquoteright}s deployed Tor network. By coming up with a model to better understand Tor{\textquoteright}s routing behavior under congestion, we further provide a statistical analysis characterizing how effective our attack is in each case}, keywords = {anonymity, attack, denial-of-service, installation, Tor}, url = {http://grothoff.org/christian/tor.pdf}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/tor.pdf}, @@ -3016,7 +2972,7 @@ We then strengthen the original congestion attack by combining it with a novel b volume = {410}, year = {2009}, pages = {453{\textendash}466}, - abstract = {We consider the problem of designing an efficient and robust distributed random number generator for peer-to-peer systems that is easy to implement and works even if all communication channels are public. A robust random number generator is crucial for avoiding adversarial join-leave attacks on peer-to-peer overlay networks. We show that our new generator together with a light-weight rule recently proposed in [B. Awerbuch, C. Scheideler, Towards a scalable and robust DHT, in: Proc. of the 18th ACM Symp. on Parallel Algorithms and Architectures, SPAA, 2006. See also http://www14.in.tum.de/personen/scheideler] for keeping peers well distributed can keep various structured overlay networks in a robust state even under a constant fraction of adversarial peers. }, + abstract = {We consider the problem of designing an efficient and robust distributed random number generator for peer-to-peer systems that is easy to implement and works even if all communication channels are public. A robust random number generator is crucial for avoiding adversarial join-leave attacks on peer-to-peer overlay networks. We show that our new generator together with a light-weight rule recently proposed in [B. Awerbuch, C. Scheideler, Towards a scalable and robust DHT, in: Proc. of the 18th ACM Symp. on Parallel Algorithms and Architectures, SPAA, 2006. See also http://www14.in.tum.de/personen/scheideler] for keeping peers well distributed can keep various structured overlay networks in a robust state even under a constant fraction of adversarial peers }, keywords = {Join-leave attacks, Peer-to-peer systems, Random number generation}, issn = {0304-3975}, doi = {10.1016/j.tcs.2008.10.003}, @@ -3034,8 +2990,7 @@ We then strengthen the original congestion attack by combining it with a novel b address = {New York, NY, USA}, abstract = {Wireless sensor networks (WSNs) are about to become a popular and inexpensive tool for all kinds of applications. More advanced applications also need end-to-end routing, which goes beyond the simple data dissemination and collection mechanisms of early WSNs. The special properties of WSNs -- scarce memory, CPU, and energy resources -- make this a challenge. The Dynamic Address Routing protocol (DART) could be a good candidate for WSN routing, if it were not so prone to link outages. -In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protocol for large WSNs. It combines ideas from landmark routing, flooding, and dynamic address routing. SLF is robust against link and node outages, requires only little routing state, and generates low maintenance traffic overhead. -}, +In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protocol for large WSNs. It combines ideas from landmark routing, flooding, and dynamic address routing. SLF is robust against link and node outages, requires only little routing state, and generates low maintenance traffic overhead}, keywords = {wireless sensor network}, isbn = {978-1-60558-751-6}, doi = {10.1145/1658997.1658999}, @@ -3052,8 +3007,7 @@ In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protoc organization = {ACM New York, NY, USA}, abstract = {We introduce Torsk, a structured peer-to-peer low-latency anonymity protocol. Torsk is designed as an interoperable replacement for the relay selection and directory service of the popular Tor anonymity network, that decreases the bandwidth cost of relay selection and maintenance from quadratic to quasilinear while introducing no new attacks on the anonymity provided by Tor, and no additional delay to connections made via Tor. The resulting bandwidth savings make a modest-sized Torsk network significantly cheaper to operate, and allows low-bandwidth clients to join the network. -Unlike previous proposals for P2P anonymity schemes, Torsk does not require all users to relay traffic for others. Torsk utilizes a combination of two P2P lookup mechanisms with complementary strengths in order to avoid attacks on the confidentiality and integrity of lookups. We show by analysis that previously known attacks on P2P anonymity schemes do not apply to Torsk, and report on experiments conducted with a 336-node wide-area deployment of Torsk, demonstrating its efficiency and feasibility. -}, +Unlike previous proposals for P2P anonymity schemes, Torsk does not require all users to relay traffic for others. Torsk utilizes a combination of two P2P lookup mechanisms with complementary strengths in order to avoid attacks on the confidentiality and integrity of lookups. We show by analysis that previously known attacks on P2P anonymity schemes do not apply to Torsk, and report on experiments conducted with a 336-node wide-area deployment of Torsk, demonstrating its efficiency and feasibility}, keywords = {P2P}, isbn = {978-1-60558-894-0}, doi = {10.1145/1653662.1653733}, @@ -3113,8 +3067,7 @@ formance at a slight anonymity cost, while at the same time protecting against selective DoS attacks. We show that our system has manageable overhead and can handle moderate churn, making it an attractive new design for P2P anony- -mous communication. -}, +mous communication}, keywords = {anonymity, peer-to-peer, random walks}, isbn = {978-1-60558-894-0}, doi = {10.1145/1653662.1653683}, @@ -3281,8 +3234,7 @@ techniques, we show how to optimally modify packets in real-time to reduce the a volume = {Volume 5918/2009}, year = {2009}, pages = {174-184}, - abstract = {Network Coordinates are a basic building block for most peer-to-peer applications nowadays. They optimize the peer selection process by allowing the nodes to preferably attach to peers to whom they then experience a low round trip time. Albeit there has been substantial research effort in this topic over the last years, the optimization of the various network coordinate algorithms has not been pursued systematically yet. Analyzing the well-known Vivaldi algorithm and its proposed optimizations with several sets of extensive Internet traffic traces, we found that in face of current Internet data most of the parameters that have been recommended in the original papers are a magnitude too high. Based on this insight, we recommend modified parameters that improve the algorithms{\textquoteright} performance significantly. -}, + abstract = {Network Coordinates are a basic building block for most peer-to-peer applications nowadays. They optimize the peer selection process by allowing the nodes to preferably attach to peers to whom they then experience a low round trip time. Albeit there has been substantial research effort in this topic over the last years, the optimization of the various network coordinate algorithms has not been pursued systematically yet. Analyzing the well-known Vivaldi algorithm and its proposed optimizations with several sets of extensive Internet traffic traces, we found that in face of current Internet data most of the parameters that have been recommended in the original papers are a magnitude too high. Based on this insight, we recommend modified parameters that improve the algorithms{\textquoteright} performance significantly}, isbn = {978-3-642-10864-8}, issn = {0302-9743}, doi = {10.1007/978-3-642-10865-5}, @@ -3371,8 +3323,7 @@ We present a novel method that applies common text mining techniques to the norm organization = {ACM}, abstract = {We design and analyze the first practical anonymous payment mechanisms for network services. We start by reporting on our experience with the implementation of a routing micropayment solution for Tor. We then propose micropayment protocols of increasingly complex requirements for networked services, such as P2P or cloud-hosted services. -The solutions are efficient, with bandwidth and latency overheads of under 4\% and 0.9 ms respectively (in ORPay for Tor), provide full anonymity (both for payers and payees), and support thousands of transactions per second. -}, +The solutions are efficient, with bandwidth and latency overheads of under 4\% and 0.9 ms respectively (in ORPay for Tor), provide full anonymity (both for payers and payees), and support thousands of transactions per second}, keywords = {anonymity, onion routing, payment, privacy}, isbn = {978-1-60558-783-7 }, doi = {10.1145/1655188.1655195}, @@ -3502,8 +3453,7 @@ We use the simulator to compare representative protocols under identical conditi organization = {IEEE}, address = {Turku, Finland}, abstract = {In this paper, we present the first heuristic for fully distributed bootstrapping of peer-to-peer networks. Our heuristic generates a stream of promising IP addresses to be probed as entry points. This stream is generated using statistical profiles using the IP ranges of start-of-authorities (SOAs) in the domain name system (DNS). We -present experimental results demonstrating that with this approach it is efficient and practical to bootstrap Gnutella-sized peer-to-peer networks --- without the need for centralized services or the public exposure of end-user{\textquoteright}s private IP addresses. -}, +present experimental results demonstrating that with this approach it is efficient and practical to bootstrap Gnutella-sized peer-to-peer networks --- without the need for centralized services or the public exposure of end-user{\textquoteright}s private IP addresses}, keywords = {bootstrapping, DNS, installation, P2P}, url = {http://grothoff.org/christian/bootstrap.pdf}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/bootstrap.pdf}, @@ -3535,8 +3485,7 @@ present experimental results demonstrating that with this approach it is efficie publisher = {Springer}, organization = {Springer}, address = {Leuven, Belgium}, - abstract = {Users building routes through an anonymization network must discover the nodes comprising the network. Yet, it is potentially costly, or even infeasible, for everyone to know the entire network. We introduce a novel attack, the route bridging attack, which makes use of what route creators do not know of the network. We also present new discussion and results concerning route fingerprinting attacks, which make use of what route creators do know of the network. We prove analytic bounds for both route fingerprinting and route bridging and describe the impact of these attacks on published anonymity-network designs. We also discuss implications for network scaling and client-server vs. peer-to-peer systems. -}, + abstract = {Users building routes through an anonymization network must discover the nodes comprising the network. Yet, it is potentially costly, or even infeasible, for everyone to know the entire network. We introduce a novel attack, the route bridging attack, which makes use of what route creators do not know of the network. We also present new discussion and results concerning route fingerprinting attacks, which make use of what route creators do know of the network. We prove analytic bounds for both route fingerprinting and route bridging and describe the impact of these attacks on published anonymity-network designs. We also discuss implications for network scaling and client-server vs. peer-to-peer systems}, keywords = {anonymity, P2P, route bridging attack}, isbn = {978-3-540-70629-8}, doi = {10.1007/978-3-540-70630-4}, @@ -3554,8 +3503,7 @@ present experimental results demonstrating that with this approach it is efficie pages = {267{\textendash}280}, publisher = {IEEE Press}, address = {Piscataway, NJ, USA}, - abstract = {In recent years, peer-to-peer (P2P) file-sharing systems have evolved to accommodate growing numbers of participating peers. In particular, new features have changed the properties of the unstructured overlay topologies formed by these peers. Little is known about the characteristics of these topologies and their dynamics in modern file-sharing applications, despite their importance. This paper presents a detailed characterization of P2P overlay topologies and their dynamics, focusing on the modern Gnutella network. We present Cruiser, a fast and accurate P2P crawler, which can capture a complete snapshot of the Gnutella network of more than one million peers in just a few minutes, and show how inaccuracy in snapshots can lead to erroneous conclusions--such as a power-law degree distribution. Leveraging recent overlay snapshots captured with Cruiser, we characterize the graph-related properties of individual overlay snapshots and overlay dynamics across slices of back-to-back snapshots. Our results reveal that while the Gnutella network has dramatically grown and changed in many ways, it still exhibits the clustering and short path lengths of a small world network. Furthermore, its overlay topology is highly resilient to random peer departure and even systematic attacks. More interestingly, overlay dynamics lead to an "onion-like" biased connectivity among peers where each peer is more likely connected to peers with higher uptime. Therefore, long-lived peers form a stable core that ensures reachability among peers despite overlay dynamics. -}, + abstract = {In recent years, peer-to-peer (P2P) file-sharing systems have evolved to accommodate growing numbers of participating peers. In particular, new features have changed the properties of the unstructured overlay topologies formed by these peers. Little is known about the characteristics of these topologies and their dynamics in modern file-sharing applications, despite their importance. This paper presents a detailed characterization of P2P overlay topologies and their dynamics, focusing on the modern Gnutella network. We present Cruiser, a fast and accurate P2P crawler, which can capture a complete snapshot of the Gnutella network of more than one million peers in just a few minutes, and show how inaccuracy in snapshots can lead to erroneous conclusions--such as a power-law degree distribution. Leveraging recent overlay snapshots captured with Cruiser, we characterize the graph-related properties of individual overlay snapshots and overlay dynamics across slices of back-to-back snapshots. Our results reveal that while the Gnutella network has dramatically grown and changed in many ways, it still exhibits the clustering and short path lengths of a small world network. Furthermore, its overlay topology is highly resilient to random peer departure and even systematic attacks. More interestingly, overlay dynamics lead to an "onion-like" biased connectivity among peers where each peer is more likely connected to peers with higher uptime. Therefore, long-lived peers form a stable core that ensures reachability among peers despite overlay dynamics}, keywords = {file-sharing, P2P}, issn = {1063-6692}, doi = {10.1109/TNET.2007.900406}, @@ -3589,8 +3537,7 @@ To evaluate our novel attack, we used a real-world anonymizing system, TOR. We s publisher = {IEEE Computer Society Press}, organization = {IEEE Computer Society Press}, address = {Gino-wan, Okinawa, Japan}, - abstract = {The paper introduces a peer-to-peer system called P2PRIV (peer-to-peer direct and anonymous distribution overlay). Basic novel features of P2PRIV are: (i) a peer-to-peer parallel content exchange architecture, and (ii) separation of the anonymization process from the transport function. These features allow a considerable saving of service time while preserving high degree of anonymity. In the paper we evaluate anonymity measures of P2PRIV (using a normalized entropy measurement model) as well as its traffic measures (including service time and network dynamics), and compare anonymity and traffic performance of P2PRIV with a well known system called CROWDS. -}, + abstract = {The paper introduces a peer-to-peer system called P2PRIV (peer-to-peer direct and anonymous distribution overlay). Basic novel features of P2PRIV are: (i) a peer-to-peer parallel content exchange architecture, and (ii) separation of the anonymization process from the transport function. These features allow a considerable saving of service time while preserving high degree of anonymity. In the paper we evaluate anonymity measures of P2PRIV (using a normalized entropy measurement model) as well as its traffic measures (including service time and network dynamics), and compare anonymity and traffic performance of P2PRIV with a well known system called CROWDS}, keywords = {communication system security, privacy}, isbn = {978-0-7695-3095-6}, doi = {10.1109/AINA.2008.117}, @@ -3946,8 +3893,7 @@ To deal with such networks researchers have suggested to use flooding-based rout publisher = {USENIX Association Berkeley, CA, USA}, organization = {USENIX Association Berkeley, CA, USA}, address = {San Jose, CA, US}, - abstract = {The Tor anonymisation network allows services, such as web servers, to be operated under a pseudonym. In previous work Murdoch described a novel attack to reveal such hidden services by correlating clock skew changes with times of increased load, and hence temperature. Clock skew measurement suffers from two main sources of noise: network jitter and timestamp quantisation error. Depending on the target{\textquoteright}s clock frequency the quantisation noise can be orders of magnitude larger than the noise caused by typical network jitter. Quantisation noise limits the previous attacks to situations where a high frequency clock is available. It has been hypothesised that by synchronising measurements to the clock ticks, quantisation noise can be reduced. We show how such synchronisation can be achieved and maintained, despite network jitter. Our experiments show that synchronised sampling significantly reduces the quantisation error and the remaining noise only depends on the network jitter (but not clock frequency). Our improved skew estimates are up to two magnitudes more accurate for low-resolution timestamps and up to one magnitude more accurate for high-resolution timestamps, when compared to previous random sampling techniques. The improved accuracy not only allows previous attacks to be executed faster and with less network traffic but also opens the door to previously infeasible attacks on low-resolution clocks, including measuring skew of a HTTP server over the anonymous channel. -}, + abstract = {The Tor anonymisation network allows services, such as web servers, to be operated under a pseudonym. In previous work Murdoch described a novel attack to reveal such hidden services by correlating clock skew changes with times of increased load, and hence temperature. Clock skew measurement suffers from two main sources of noise: network jitter and timestamp quantisation error. Depending on the target{\textquoteright}s clock frequency the quantisation noise can be orders of magnitude larger than the noise caused by typical network jitter. Quantisation noise limits the previous attacks to situations where a high frequency clock is available. It has been hypothesised that by synchronising measurements to the clock ticks, quantisation noise can be reduced. We show how such synchronisation can be achieved and maintained, despite network jitter. Our experiments show that synchronised sampling significantly reduces the quantisation error and the remaining noise only depends on the network jitter (but not clock frequency). Our improved skew estimates are up to two magnitudes more accurate for low-resolution timestamps and up to one magnitude more accurate for high-resolution timestamps, when compared to previous random sampling techniques. The improved accuracy not only allows previous attacks to be executed faster and with less network traffic but also opens the door to previously infeasible attacks on low-resolution clocks, including measuring skew of a HTTP server over the anonymous channel}, keywords = {anonymity, pseudonym, Tor}, url = {http://portal.acm.org/citation.cfm?id=1496726}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/improved-clockskew.pdf}, @@ -4201,7 +4147,7 @@ In this paper, we demonstrate a payment scheme that can be used to compensate no publisher = {Springer}, organization = {Springer}, address = {Leuven, Belgium}, - abstract = {Traffic analysis is the best known approach to uncover relationships amongst users of anonymous communication systems, such as mix networks. Surprisingly, all previously published techniques require very specific user behavior to break the anonymity provided by mixes. At the same time, it is also well known that none of the considered user models reflects realistic behavior which casts some doubt on previous work with respect to real-life scenarios. We first present a user behavior model that, to the best of our knowledge, is the least restrictive scheme considered so far. Second, we develop the Perfect Matching Disclosure Attack, an efficient attack based on graph theory that operates without any assumption on user behavior. The attack is highly effective when de-anonymizing mixing rounds because it considers all users in a round at once, rather than single users iteratively. Furthermore, the extracted sender-receiver relationships can be used to enhance user profile estimations. We extensively study the effectiveness and efficiency of our attack and previous work when de-anonymizing users communicating through a threshold mix. Empirical results show the advantage of our proposal. We also show how the attack can be refined and adapted to different scenarios including pool mixes, and how precision can be traded in for speed, which might be desirable in certain cases. }, + abstract = {Traffic analysis is the best known approach to uncover relationships amongst users of anonymous communication systems, such as mix networks. Surprisingly, all previously published techniques require very specific user behavior to break the anonymity provided by mixes. At the same time, it is also well known that none of the considered user models reflects realistic behavior which casts some doubt on previous work with respect to real-life scenarios. We first present a user behavior model that, to the best of our knowledge, is the least restrictive scheme considered so far. Second, we develop the Perfect Matching Disclosure Attack, an efficient attack based on graph theory that operates without any assumption on user behavior. The attack is highly effective when de-anonymizing mixing rounds because it considers all users in a round at once, rather than single users iteratively. Furthermore, the extracted sender-receiver relationships can be used to enhance user profile estimations. We extensively study the effectiveness and efficiency of our attack and previous work when de-anonymizing users communicating through a threshold mix. Empirical results show the advantage of our proposal. We also show how the attack can be refined and adapted to different scenarios including pool mixes, and how precision can be traded in for speed, which might be desirable in certain cases }, keywords = {mix, traffic analysis}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.147.4953}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/troncoso-pet2008.pdf}, @@ -4251,8 +4197,7 @@ In this paper, we demonstrate a payment scheme that can be used to compensate no institution = {Swiss Federal Institute of Technology (EPFL)}, type = {Tech report}, address = {Lausanne, Switzerland}, - abstract = {Abstract. In Distributed Constraint Satisfaction Problems, agents often desire to find a solution while revealing as little as possible about their variables and constraints. So far, most algorithms for DisCSP do not guarantee privacy of this information. This paper describes some simple obfuscation techniques that can be used with DisCSP algorithms such as DPOP, and provide sensible privacy guarantees based on the distributed solving process without sacrificing its efficiency. -}, + abstract = {Abstract. In Distributed Constraint Satisfaction Problems, agents often desire to find a solution while revealing as little as possible about their variables and constraints. So far, most algorithms for DisCSP do not guarantee privacy of this information. This paper describes some simple obfuscation techniques that can be used with DisCSP algorithms such as DPOP, and provide sensible privacy guarantees based on the distributed solving process without sacrificing its efficiency}, keywords = {algorithms, DisCSP algorithm, distributed constraint satisfaction, optimization, privacy, SMC}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Tech\%20Report\%20-\%20Privacy\%20guarantees\%20through\%20DCS.pdf}, author = {Boi Faltings and Thomas Leaute and Adrian Petcu} @@ -4285,7 +4230,7 @@ In this paper, we demonstrate a payment scheme that can be used to compensate no year = {2008}, type = {publication}, address = {St. Petersburg, U.S}, - abstract = {Key based routing (KBR) enables peer-to-peer applications to create and use distributed services. KBR is more flexible than distributed hash tables (DHT). However, the broader the application area, the more important become performance issues for a KBR service. In this paper, we present a novel approach to provide a generic KBR service. Its key idea is to use a predictable address assignment scheme. This scheme allows peers to calculate the overlay address of the node that is responsible for a given key and application ID. A public DHT service such as OpenDHT can then resolve this overlay address to the transport address of the respective peer. We compare our solution to alternative proposals such as ReDiR and Diminished Chord. We conclude that our solution has a better worst case complexity for some important KBR operations and the required state. In particular, unlike ReDiR, our solution can guarantee a low latency for KBR route operations. }, + abstract = {Key based routing (KBR) enables peer-to-peer applications to create and use distributed services. KBR is more flexible than distributed hash tables (DHT). However, the broader the application area, the more important become performance issues for a KBR service. In this paper, we present a novel approach to provide a generic KBR service. Its key idea is to use a predictable address assignment scheme. This scheme allows peers to calculate the overlay address of the node that is responsible for a given key and application ID. A public DHT service such as OpenDHT can then resolve this overlay address to the transport address of the respective peer. We compare our solution to alternative proposals such as ReDiR and Diminished Chord. We conclude that our solution has a better worst case complexity for some important KBR operations and the required state. In particular, unlike ReDiR, our solution can guarantee a low latency for KBR route operations }, keywords = {distributed hash table, P2P}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/di08iptps.pdf}, @@ -4334,8 +4279,7 @@ In this paper, we demonstrate a payment scheme that can be used to compensate no publisher = {Springer}, organization = {Springer}, address = {Leuven, Belgium}, - abstract = {We present a reputation scheme for a pseudonymous peer-to-peer (P2P) system in an anonymous network. Misbehavior is one of the biggest problems in pseudonymous P2P systems, where there is little incentive for proper behavior. In our scheme, using ecash for reputation points, the reputation of each user is closely related to his real identity rather than to his current pseudonym. Thus, our scheme allows an honest user to switch to a new pseudonym keeping his good reputation, while hindering a malicious user from erasing his trail of evil deeds with a new pseudonym. -}, + abstract = {We present a reputation scheme for a pseudonymous peer-to-peer (P2P) system in an anonymous network. Misbehavior is one of the biggest problems in pseudonymous P2P systems, where there is little incentive for proper behavior. In our scheme, using ecash for reputation points, the reputation of each user is closely related to his real identity rather than to his current pseudonym. Thus, our scheme allows an honest user to switch to a new pseudonym keeping his good reputation, while hindering a malicious user from erasing his trail of evil deeds with a new pseudonym}, keywords = {anonymity, P2P, pseudonym}, isbn = {978-3-540-70629-8}, doi = {10.1007/978-3-540-70630-4_13}, @@ -4520,7 +4464,7 @@ To sample the results, we show that web traffic makes up the majority of the con month = feb, publisher = {Internet Society}, organization = {Internet Society}, - abstract = {The Tor anonymous communication network uses selfreported bandwidth values to select routers for building tunnels. Since tunnels are allocated in proportion to this bandwidth, this allows a malicious router operator to attract tunnels for compromise. Since the metric used is insensitive to relative load, it does not adequately respond to changing conditions and hence produces unreliable performance, driving many users away. We propose an opportunistic bandwidth measurement algorithm to replace selfreported values and address both of these problems. We also propose a mechanisms to let users tune Tor performance to achieve higher performance or higher anonymity. Our mechanism effectively blends the traffic from users of different preferences, making partitioning attacks difficult. We implemented the opportunistic measurement and tunable performance extensions and examined their performance both analytically and in the real Tor network. Our results show that users can get dramatic increases in either performance or anonymity with little to no sacrifice in the other metric, or a more modest improvement in both. Our mechanisms are also invulnerable to the previously published low-resource attacks on Tor. }, + abstract = {The Tor anonymous communication network uses selfreported bandwidth values to select routers for building tunnels. Since tunnels are allocated in proportion to this bandwidth, this allows a malicious router operator to attract tunnels for compromise. Since the metric used is insensitive to relative load, it does not adequately respond to changing conditions and hence produces unreliable performance, driving many users away. We propose an opportunistic bandwidth measurement algorithm to replace selfreported values and address both of these problems. We also propose a mechanisms to let users tune Tor performance to achieve higher performance or higher anonymity. Our mechanism effectively blends the traffic from users of different preferences, making partitioning attacks difficult. We implemented the opportunistic measurement and tunable performance extensions and examined their performance both analytically and in the real Tor network. Our results show that users can get dramatic increases in either performance or anonymity with little to no sacrifice in the other metric, or a more modest improvement in both. Our mechanisms are also invulnerable to the previously published low-resource attacks on Tor }, keywords = {anonymity, Tor}, doi = {10.1109/NCM.2009.205}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.7368}, @@ -4617,8 +4561,7 @@ To sample the results, we show that web traffic makes up the majority of the con institution = {Institut Eurecom}, type = {Tech report}, address = {Sophia Antipolis}, - abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHTs have been implemented in real systems and deployed on a large scale. One exception is KAD, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling KAD continuously for about six months and obtained information about geographical distribution of peers, session times, peer availability, and peer lifetime. We also evaluated to what extent information about past peer uptime can be used to predict the remaining uptime of the peer. Peers are identified by the so called KAD ID, which was up to now as- sumed to remain the same across sessions. However, we observed that this is not the case: There is a large number of peers, in particular in China, that change their KAD ID, sometimes as frequently as after each session. This change of KAD IDs makes it difficult to characterize end-user availability or membership turnover. By tracking end-users with static IP addresses, we could measure the rate of change of KAD ID per end-user. -}, + abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHTs have been implemented in real systems and deployed on a large scale. One exception is KAD, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling KAD continuously for about six months and obtained information about geographical distribution of peers, session times, peer availability, and peer lifetime. We also evaluated to what extent information about past peer uptime can be used to predict the remaining uptime of the peer. Peers are identified by the so called KAD ID, which was up to now as- sumed to remain the same across sessions. However, we observed that this is not the case: There is a large number of peers, in particular in China, that change their KAD ID, sometimes as frequently as after each session. This change of KAD IDs makes it difficult to characterize end-user availability or membership turnover. By tracking end-users with static IP addresses, we could measure the rate of change of KAD ID per end-user}, keywords = {distributed hash table, KAD, peer behavior}, issn = {RR-07-205}, url = {http://www.eurecom.fr/~btroup/kadtraces/}, @@ -4751,8 +4694,7 @@ While large-scale multiplayer games have typically used a client/server architec We propose solving the consistency problem through secure and scalable event ordering. While traditional event ordering requires all-to-all message passing and at least two rounds of communication, we argue that multiplayer games lend themselves naturally to a hierarchical decomposition of their state space so that we can reduce the communication cost of event ordering. We also argue that by using cryptography, a discrete view of time, and majority voting, we can totally order events in a real-time setting. By applying these two concepts, we can scale multiplayer games to millions of players. -We develop our solution in two parts: a cheat-proof and real-time event ordering protocol and a scalable, hierarchical structure that organizes peers in a tree according to their scope of interest in the game. Our work represents the first, complete solution to this problem and we show through both proofs and simulations that our protocols allow the creation of large-scale, peer-to-peer games that are resistant to cheating while maintaining real-time responsiveness in the system. -}, +We develop our solution in two parts: a cheat-proof and real-time event ordering protocol and a scalable, hierarchical structure that organizes peers in a tree according to their scope of interest in the game. Our work represents the first, complete solution to this problem and we show through both proofs and simulations that our protocols allow the creation of large-scale, peer-to-peer games that are resistant to cheating while maintaining real-time responsiveness in the system}, url = {http://portal.acm.org/citation.cfm?id=1329865$\#$}, author = {Chis GauthierDickey} } @@ -4810,8 +4752,7 @@ We develop our solution in two parts: a cheat-proof and real-time event ordering address = {New York, NY, USA}, abstract = {The Internet{\textquoteright}s routing system is facing stresses due to its poor fundamental scaling properties. Compact routing is a research field that studies fundamental limits of routing scalability and designs algorithms that try to meet these limits. In particular, compact routing research shows that shortest-path routing, forming a core of traditional routing algorithms, cannot guarantee routing table (RT) sizes that on all network topologies grow slower than linearly as functions of the network size. However, there are plenty of compact routing schemes that relax the shortest-path requirement and allow for improved, sublinear RT size scaling that is mathematically provable for all static network topologies. In particular, there exist compact routing schemes designed for grids, trees, and Internet-like topologies that offer RT sizes that scale logarithmically with the network size. -In this paper, we demonstrate that in view of recent results in compact routing research, such logarithmic scaling on Internet-like topologies is fundamentally impossible in the presence of topology dynamics or topology-independent (flat) addressing. We use analytic arguments to show that the number of routing control messages per topology change cannot scale better than linearly on Internet-like topologies. We also employ simulations to confirm that logarithmic RT size scaling gets broken by topology-independent addressing, a cornerstone of popular locator-identifier split proposals aiming at improving routing scaling in the presence of network topology dynamics or host mobility. These pessimistic findings lead us to the conclusion that a fundamental re-examination of assumptions behind routing models and abstractions is needed in order to find a routing architecture that would be able to scale "indefinitely. -}, +In this paper, we demonstrate that in view of recent results in compact routing research, such logarithmic scaling on Internet-like topologies is fundamentally impossible in the presence of topology dynamics or topology-independent (flat) addressing. We use analytic arguments to show that the number of routing control messages per topology change cannot scale better than linearly on Internet-like topologies. We also employ simulations to confirm that logarithmic RT size scaling gets broken by topology-independent addressing, a cornerstone of popular locator-identifier split proposals aiming at improving routing scaling in the presence of network topology dynamics or host mobility. These pessimistic findings lead us to the conclusion that a fundamental re-examination of assumptions behind routing models and abstractions is needed in order to find a routing architecture that would be able to scale "indefinitely}, keywords = {compact routing, internet routing, routing scalability}, issn = {0146-4833}, doi = {10.1145/1273445.1273450}, @@ -4883,8 +4824,7 @@ This thesis demonstrates how theoretical models and generic methodologies relati month = {October}, publisher = {ACM New York, NY, USA}, organization = {ACM New York, NY, USA}, - abstract = {We consider the effect attackers who disrupt anonymous communications have on the security of traditional high- and low-latency anonymous communication systems, as well as on the Hydra-Onion and Cashmere systems that aim to offer reliable mixing, and Salsa, a peer-to-peer anonymous communication network. We show that denial of service (DoS) lowers anonymity as messages need to get retransmitted to be delivered, presenting more opportunities for attack. We uncover a fundamental limit on the security of mix networks, showing that they cannot tolerate a majority of nodes being malicious. Cashmere, Hydra-Onion, and Salsa security is also badly affected by DoS attackers. Our results are backed by probabilistic modeling and extensive simulations and are of direct applicability to deployed anonymity systems. -}, + abstract = {We consider the effect attackers who disrupt anonymous communications have on the security of traditional high- and low-latency anonymous communication systems, as well as on the Hydra-Onion and Cashmere systems that aim to offer reliable mixing, and Salsa, a peer-to-peer anonymous communication network. We show that denial of service (DoS) lowers anonymity as messages need to get retransmitted to be delivered, presenting more opportunities for attack. We uncover a fundamental limit on the security of mix networks, showing that they cannot tolerate a majority of nodes being malicious. Cashmere, Hydra-Onion, and Salsa security is also badly affected by DoS attackers. Our results are backed by probabilistic modeling and extensive simulations and are of direct applicability to deployed anonymity systems}, keywords = {anonymity, attack, denial-of-service, reliability}, isbn = {978-1-59593-703-2}, doi = {10.1145/1315245.1315258}, @@ -5371,8 +5311,7 @@ that the map of a message to an elliptic curve point can be made without any modification. This paper provides a solution to the open problem posed in [7] concerning the creation of a deterministic method to map arbitrary -message to an elliptic curve. -}, +message to an elliptic curve}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ijns-2009-v8-n2-p169-176.pdf}, author = {Brian King} } @@ -5725,8 +5664,7 @@ We also discuss various proposed countermeasures designed to detect, thwart or l Today with the advent of Peer-to-Peer technology, distributed file systems that work on top of Peer-to-Peer systems can be built. These systems can be built with no or much less centralised components and are usable on a global scale. The System Architecture Group at the University of Karlsruhe in Germany has developedsuch a file system, which is built on top of a structured overlay network and uses Distributed Hash Tables to store and access the information. One problem with this approach is, that each file system can only be accessed with the help of an identifier, which changes whenever a file system is modified. All clients have to be notified of the new identifier in a secure, fast and reliable way. -Usually the strategy to solve this type of problem is an encrypted multicast. This thesis presents and analyses several strategies of using multicast distributions to solve this problem and then unveils our final solution based on the Subset Difference method proposed by Naor et al. -}, +Usually the strategy to solve this type of problem is an encrypted multicast. This thesis presents and analyses several strategies of using multicast distributions to solve this problem and then unveils our final solution based on the Subset Difference method proposed by Naor et al}, keywords = {distributed file system, distributed hash table, peer-to-peer networking, store information}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Amann\%20-\%20Secure\%20asynchronous\%20change\%20notifications.pdf}, author = {Bernhard Amann} @@ -5834,7 +5772,7 @@ Usually the strategy to solve this type of problem is an encrypted multicast. Th publisher = {Springer-Verlag}, organization = {Springer-Verlag}, address = {Tobago}, - abstract = {Private keyword search is a technique that allows for searching and retrieving documents matching certain keywords without revealing the search criteria. We improve the space efficiency of the Ostrovsky et al. Private Search [9] scheme, by describing methods that require considerably shorter buffers for returning the results of the search. Our basic decoding scheme recursive extraction, requires buffers of length less than twice the number of returned results and is still simple and highly efficient. Our extended decoding schemes rely on solving systems of simultaneous equations, and in special cases can uncover documents in buffers that are close to 95 \% full. Finally we note the similarity between our decoding techniques and the ones used to decode rateless codes, and show how such codes can be extracted from encrypted documents. }, + abstract = {Private keyword search is a technique that allows for searching and retrieving documents matching certain keywords without revealing the search criteria. We improve the space efficiency of the Ostrovsky et al. Private Search [9] scheme, by describing methods that require considerably shorter buffers for returning the results of the search. Our basic decoding scheme recursive extraction, requires buffers of length less than twice the number of returned results and is still simple and highly efficient. Our extended decoding schemes rely on solving systems of simultaneous equations, and in special cases can uncover documents in buffers that are close to 95 \% full. Finally we note the similarity between our decoding techniques and the ones used to decode rateless codes, and show how such codes can be extracted from encrypted documents }, keywords = {keywords, privacy}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.130.7014}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/privsearch-aeolus.pdf}, @@ -5889,8 +5827,7 @@ Usually the strategy to solve this type of problem is an encrypted multicast. Th organization = {USENIX Association Berkeley, CA, USA}, abstract = {This paper investigates the problem of designing anonymity networks that meet application-specific performance and security constraints. We argue that existing anonymity networks take a narrow view of performance by considering only the strength of the offered anonymity. However, real-world applications impose a myriad of communication requirements, including end-to-end bandwidth and latency, trustworthiness of intermediary routers, and network jitter. -We pose a grand challenge for anonymity: the development of a network architecture that enables applications to customize routes that tradeoff between anonymity and performance. Towards this challenge, we present the Application-Aware Anonymity (A3) routing service. We envision that A3 will serve as a powerful and flexible anonymous communications layer that will spur the future development of anonymity services. -}, +We pose a grand challenge for anonymity: the development of a network architecture that enables applications to customize routes that tradeoff between anonymity and performance. Towards this challenge, we present the Application-Aware Anonymity (A3) routing service. We envision that A3 will serve as a powerful and flexible anonymous communications layer that will spur the future development of anonymity services}, keywords = {anonymity, routing}, url = {http://portal.acm.org/citation.cfm?id=1361423}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/a3.pdf}, @@ -5922,7 +5859,7 @@ We pose a grand challenge for anonymity: the development of a network architectu publisher = {Springer-Verlag}, organization = {Springer-Verlag}, address = {Saint-Malo,FR}, - abstract = {A continuously-observable steganographic file system allows to remotely store user files on a raw storage device; the security goal is to offer plausible deniability even when the raw storage device is continuously monitored by an attacker. Zhou, Pang and Tan have proposed such a system in [7] with a claim of provable security against traffic analysis. In this paper, we disprove their claims by presenting traffic analysis attacks on the file update algorithm of Zhou et al. Our attacks are highly effective in detecting file updates and revealing the existence and location of files. For multi-block files, we show that two updates are sufficient to discover the file. One-block files accessed a sufficient number of times can also be revealed. Our results suggest that simple randomization techniques are not sufficient to protect steganographic file systems from traffic analysis attacks. }, + abstract = {A continuously-observable steganographic file system allows to remotely store user files on a raw storage device; the security goal is to offer plausible deniability even when the raw storage device is continuously monitored by an attacker. Zhou, Pang and Tan have proposed such a system in [7] with a claim of provable security against traffic analysis. In this paper, we disprove their claims by presenting traffic analysis attacks on the file update algorithm of Zhou et al. Our attacks are highly effective in detecting file updates and revealing the existence and location of files. For multi-block files, we show that two updates are sufficient to discover the file. One-block files accessed a sufficient number of times can also be revealed. Our results suggest that simple randomization techniques are not sufficient to protect steganographic file systems from traffic analysis attacks }, keywords = {traffic analysis}, isbn = {978-3-540-77369-6}, doi = {10.1007/978-3-540-77370-2}, @@ -5938,7 +5875,7 @@ We pose a grand challenge for anonymity: the development of a network architectu publisher = {Springer}, organization = {Springer}, address = {Ottawa, Canada}, - abstract = { We introduce a new traffic analysis attack: the Two-sided Statistical Disclosure Attack, that tries to uncover the receivers of messages sent through an anonymizing network supporting anonymous replies. We provide an abstract model of an anonymity system with users that reply to messages. Based on this model, we propose a linear approximation describing the likely receivers of sent messages. Using simulations, we evaluate the new attack given different traffic characteristics and we show that it is superior to previous attacks when replies are routed in the system. }, + abstract = { We introduce a new traffic analysis attack: the Two-sided Statistical Disclosure Attack, that tries to uncover the receivers of messages sent through an anonymizing network supporting anonymous replies. We provide an abstract model of an anonymity system with users that reply to messages. Based on this model, we propose a linear approximation describing the likely receivers of sent messages. Using simulations, we evaluate the new attack given different traffic characteristics and we show that it is superior to previous attacks when replies are routed in the system }, keywords = {anonymity, traffic analysis}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.78.7347}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/danezis-pet2007.pdf}, @@ -6314,8 +6251,7 @@ In this paper we review SSR{\textquoteright}s self-organizing features and demon year = {2006}, pages = {239{\textendash}248}, publisher = {IEEE Computer Society Washington, DC, USA}, - abstract = {There is a vast body of work on implementing anonymous communication. In this paper, we study the possibility of using anonymous communication as a building block, and show that one can leverage on anonymity in a variety of cryptographic contexts. Our results go in two directions.--Feasibility. We show that anonymous communication over insecure channels can be used to implement unconditionally secure point-to-point channels, broadcast, and generalmulti-party protocols that remain unconditionally secure as long as less than half of the players are maliciously corrupted.--Efficiency. We show that anonymous channels can yield substantial efficiency improvements for several natural secure computation tasks. In particular, we present the first solution to the problem of private information retrieval (PIR) which can handle multiple users while being close to optimal with respect to both communication and computation.A key observation that underlies these results is that local randomization of inputs, via secret-sharing, when combined with the global mixing of the shares, provided by anonymity, allows to carry out useful computations on the inputs while keeping the inputs private. -}, + abstract = {There is a vast body of work on implementing anonymous communication. In this paper, we study the possibility of using anonymous communication as a building block, and show that one can leverage on anonymity in a variety of cryptographic contexts. Our results go in two directions.--Feasibility. We show that anonymous communication over insecure channels can be used to implement unconditionally secure point-to-point channels, broadcast, and generalmulti-party protocols that remain unconditionally secure as long as less than half of the players are maliciously corrupted.--Efficiency. We show that anonymous channels can yield substantial efficiency improvements for several natural secure computation tasks. In particular, we present the first solution to the problem of private information retrieval (PIR) which can handle multiple users while being close to optimal with respect to both communication and computation.A key observation that underlies these results is that local randomization of inputs, via secret-sharing, when combined with the global mixing of the shares, provided by anonymity, allows to carry out useful computations on the inputs while keeping the inputs private}, keywords = {anonymity, private information retrieval}, isbn = {0-7695-2720-5}, issn = {0272-5428}, @@ -6467,7 +6403,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar year = {2006}, publisher = {SIAM}, organization = {SIAM}, - abstract = {Theoretical basis for the routing protocol of Freenet 0.7. }, + abstract = {Theoretical basis for the routing protocol of Freenet 0.7 }, keywords = {small-world}, url = {http://www.math.chalmers.se/~ossa/wrt.html}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/swroute.pdf}, @@ -6652,8 +6588,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar month = {October}, publisher = {ACM New York, NY, USA}, organization = {ACM New York, NY, USA}, - abstract = {Location-hidden services, as offered by anonymity systems such as Tor, allow servers to be operated under a pseudonym. As Tor is an overlay network, servers hosting hidden services are accessible both directly and over the anonymous channel. Traffic patterns through one channel have observable effects on the other, thus allowing a service{\textquoteright}s pseudonymous identity and IP address to be linked. One proposed solution to this vulnerability is for Tor nodes to provide fixed quality of service to each connection, regardless of other traffic, thus reducing capacity but resisting such interference attacks. However, even if each connection does not influence the others, total throughput would still affect the load on the CPU, and thus its heat output. Unfortunately for anonymity, the result of temperature on clock skew can be remotely detected through observing timestamps. This attack works because existing abstract models of anonymity-network nodes do not take into account the inevitable imperfections of the hardware they run on. Furthermore, we suggest the same technique could be exploited as a classical covert channel and can even provide geolocation. -}, + abstract = {Location-hidden services, as offered by anonymity systems such as Tor, allow servers to be operated under a pseudonym. As Tor is an overlay network, servers hosting hidden services are accessible both directly and over the anonymous channel. Traffic patterns through one channel have observable effects on the other, thus allowing a service{\textquoteright}s pseudonymous identity and IP address to be linked. One proposed solution to this vulnerability is for Tor nodes to provide fixed quality of service to each connection, regardless of other traffic, thus reducing capacity but resisting such interference attacks. However, even if each connection does not influence the others, total throughput would still affect the load on the CPU, and thus its heat output. Unfortunately for anonymity, the result of temperature on clock skew can be remotely detected through observing timestamps. This attack works because existing abstract models of anonymity-network nodes do not take into account the inevitable imperfections of the hardware they run on. Furthermore, we suggest the same technique could be exploited as a classical covert channel and can even provide geolocation}, keywords = {anonymity, clock skew, covert channels, fingerprinting, Tor}, isbn = {1-59593-518-5}, doi = {10.1145/1180405.1180410}, @@ -6669,8 +6604,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar publisher = {ACM Press}, organization = {ACM Press}, address = {New York, NY, USA}, - abstract = {We create a credential system that lets a user anonymously authenticate at most $n$ times in a single time period. A user withdraws a dispenser of n e-tokens. She shows an e-token to a verifier to authenticate herself; each e-token can be used only once, however, the dispenser automatically refreshes every time period. The only prior solution to this problem, due to Damg{\r a}rd et al. [29], uses protocols that are a factor of k slower for the user and verifier, where k is the security parameter. Damg{\r a}rd et al. also only support one authentication per time period, while we support n. Because our construction is based on e-cash, we can use existing techniques to identify a cheating user, trace all of her e-tokens, and revoke her dispensers. We also offer a new anonymity service: glitch protection for basically honest users who (occasionally) reuse e-tokens. The verifier can always recognize a reused e-token; however, we preserve the anonymity of users who do not reuse e-tokens too often. -},} + abstract = {We create a credential system that lets a user anonymously authenticate at most $n$ times in a single time period. A user withdraws a dispenser of n e-tokens. She shows an e-token to a verifier to authenticate herself; each e-token can be used only once, however, the dispenser automatically refreshes every time period. The only prior solution to this problem, due to Damg{\r a}rd et al. [29], uses protocols that are a factor of k slower for the user and verifier, where k is the security parameter. Damg{\r a}rd et al. also only support one authentication per time period, while we support n. Because our construction is based on e-cash, we can use existing techniques to identify a cheating user, trace all of her e-tokens, and revoke her dispensers. We also offer a new anonymity service: glitch protection for basically honest users who (occasionally) reuse e-tokens. The verifier can always recognize a reused e-token; however, we preserve the anonymity of users who do not reuse e-tokens too often},} keywords = {clone detection, credentials, n-anonymous authentication}, isbn = {1-59593-518-5}, doi = {10.1145/1180405.1180431}, @@ -6703,8 +6637,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar publisher = {Springer}, organization = {Springer}, address = {Cambridge, UK}, - abstract = {The so-called {\textquotedblleft}Great Firewall of China{\textquotedblright} operates, in part, by inspecting TCP packets for keywords that are to be blocked. If the keyword is present, TCP reset packets (viz: with the RST flag set) are sent to both endpoints of the connection, which then close. However, because the original packets are passed through the firewall unscathed, if the endpoints completely ignore the firewall{\textquoteright}s resets, then the connection will proceed unhindered. Once one connection has been blocked, the firewall makes further easy-to-evade attempts to block further connections from the same machine. This latter behaviour can be leveraged into a denial-of-service attack on third-party machines. -}, + abstract = {The so-called {\textquotedblleft}Great Firewall of China{\textquotedblright} operates, in part, by inspecting TCP packets for keywords that are to be blocked. If the keyword is present, TCP reset packets (viz: with the RST flag set) are sent to both endpoints of the connection, which then close. However, because the original packets are passed through the firewall unscathed, if the endpoints completely ignore the firewall{\textquoteright}s resets, then the connection will proceed unhindered. Once one connection has been blocked, the firewall makes further easy-to-evade attempts to block further connections from the same machine. This latter behaviour can be leveraged into a denial-of-service attack on third-party machines}, isbn = {978-3-540-68790-0}, doi = {10.1007/11957454}, url = {http://www.springerlink.com/content/7224582654260k03/}, @@ -6744,7 +6677,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar publisher = {ACM}, organization = {ACM}, address = {Ann Arbor, Michigan, USA}, - abstract = {In this paper we argue that a robust incentive mechanism is important in a real-world peer-to-peer streaming system to ensure that nodes contribute as much upload bandwidth as they can. We show that simple tit-for-tat mechanisms which work well in file-sharing systems like BitTorrent do not perform well given the additional delay and bandwidth constraints imposed by live streaming. We present preliminary experimental results for an incentive mechanism based on the Iterated Prisoner{\textquoteright}s Dilemma problem that allows all nodes to download with low packet loss when there is sufficient capacity in the system, but when the system is resource-starved, nodes that contribute upload bandwidth receive better service than those that do not. Moreover, our algorithm does not require nodes to rely on any information other than direct observations of its neighbors {\textquoteright} behavior towards it. }, + abstract = {In this paper we argue that a robust incentive mechanism is important in a real-world peer-to-peer streaming system to ensure that nodes contribute as much upload bandwidth as they can. We show that simple tit-for-tat mechanisms which work well in file-sharing systems like BitTorrent do not perform well given the additional delay and bandwidth constraints imposed by live streaming. We present preliminary experimental results for an incentive mechanism based on the Iterated Prisoner{\textquoteright}s Dilemma problem that allows all nodes to download with low packet loss when there is sufficient capacity in the system, but when the system is resource-starved, nodes that contribute upload bandwidth receive better service than those that do not. Moreover, our algorithm does not require nodes to rely on any information other than direct observations of its neighbors {\textquoteright} behavior towards it }, keywords = {peer-to-peer streaming, tit-for-tat}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/NetEcon\%2706\%20-\%20Improving\%20robustness\%20of\%20p2p\%20streaming.pdf}, author = {Vinay Pai and Alexander E. Mohr} @@ -6939,8 +6872,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar month = {October}, publisher = {ACM New York, NY, USA}, organization = {ACM New York, NY, USA}, - abstract = {Many applications of mix networks such as anonymousWeb browsing require relationship anonymity: it should be hard for the attacker to determine who is communicating with whom. Conventional methods for measuring anonymity, however, focus on sender anonymity instead. Sender anonymity guarantees that it is difficult for the attacker to determine the origin of any given message exiting the mix network, but this may not be sufficient to ensure relationship anonymity. Even if the attacker cannot identify the origin of messages arriving to some destination, relationship anonymity will fail if he can determine with high probability that at least one of the messages originated from a particular sender, without necessarily being able to recognize this message among others. We give a formal definition and a calculation methodology for relationship anonymity. Our techniques are similar to those used for sender anonymity, but, unlike sender anonymity, relationship anonymity is sensitive to the distribution of message destinations. In particular, Zipfian distributions with skew values characteristic of Web browsing provide especially poor relationship anonymity. Our methodology takes route selection algorithms into account, and incorporates information-theoretic metrics such as entropy and min-entropy. We illustrate our methodology by calculating relationship anonymity in several simulated mix networks. -}, + abstract = {Many applications of mix networks such as anonymousWeb browsing require relationship anonymity: it should be hard for the attacker to determine who is communicating with whom. Conventional methods for measuring anonymity, however, focus on sender anonymity instead. Sender anonymity guarantees that it is difficult for the attacker to determine the origin of any given message exiting the mix network, but this may not be sufficient to ensure relationship anonymity. Even if the attacker cannot identify the origin of messages arriving to some destination, relationship anonymity will fail if he can determine with high probability that at least one of the messages originated from a particular sender, without necessarily being able to recognize this message among others. We give a formal definition and a calculation methodology for relationship anonymity. Our techniques are similar to those used for sender anonymity, but, unlike sender anonymity, relationship anonymity is sensitive to the distribution of message destinations. In particular, Zipfian distributions with skew values characteristic of Web browsing provide especially poor relationship anonymity. Our methodology takes route selection algorithms into account, and incorporates information-theoretic metrics such as entropy and min-entropy. We illustrate our methodology by calculating relationship anonymity in several simulated mix networks}, keywords = {anonymity, privacy}, isbn = {1-59593-556-8}, doi = {10.1145/1179601.1179611}, @@ -7248,8 +7180,7 @@ two shallow circuits: one for generating many arbitrarily but identically biased pages = {2551{\textendash}2567}, publisher = {IEEE Press}, address = {Piscataway, NJ, USA}, - abstract = {LT-codes are a new class of codes introduced by Luby for the purpose of scalable and fault-tolerant distribution of data over computer networks. In this paper, we introduce Raptor codes, an extension of LT-codes with linear time encoding and decoding. We will exhibit a class of universal Raptor codes: for a given integer k and any real ε > 0, Raptor codes in this class produce a potentially infinite stream of symbols such that any subset of symbols of size k(1 + ε) is sufficient to recover the original k symbols with high probability. Each output symbol is generated using O(log(1/ ε)) operations, and the original symbols are recovered from the collected ones with O(k log(1/ε)) operations.We will also introduce novel techniques for the analysis of the error probability of the decoder for finite length Raptor codes. Moreover, we will introduce and analyze systematic versions of Raptor codes, i.e., versions in which the first output elements of the coding system coincide with the original k elements. -}, + abstract = {LT-codes are a new class of codes introduced by Luby for the purpose of scalable and fault-tolerant distribution of data over computer networks. In this paper, we introduce Raptor codes, an extension of LT-codes with linear time encoding and decoding. We will exhibit a class of universal Raptor codes: for a given integer k and any real ε > 0, Raptor codes in this class produce a potentially infinite stream of symbols such that any subset of symbols of size k(1 + ε) is sufficient to recover the original k symbols with high probability. Each output symbol is generated using O(log(1/ ε)) operations, and the original symbols are recovered from the collected ones with O(k log(1/ε)) operations.We will also introduce novel techniques for the analysis of the error probability of the decoder for finite length Raptor codes. Moreover, we will introduce and analyze systematic versions of Raptor codes, i.e., versions in which the first output elements of the coding system coincide with the original k elements}, keywords = {802.11, encoding, erasure coding}, issn = {1063-6692}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/raptor.pdf}, @@ -7276,7 +7207,7 @@ two shallow circuits: one for generating many arbitrarily but identically biased pages = {213{\textendash}223}, publisher = {Emerald Group Publishing Limited}, type = {Journal}, - abstract = {The (n-1) attack is the most powerful attack against mix which is the basic building block of many modern anonymous systems. This paper aims to present a strategy that can be implemented in mix networks to detect and counter the active attacks, especially the (n-1) attack and its variants. }, + abstract = {The (n-1) attack is the most powerful attack against mix which is the basic building block of many modern anonymous systems. This paper aims to present a strategy that can be implemented in mix networks to detect and counter the active attacks, especially the (n-1) attack and its variants }, keywords = {anonymity, mix, privacy}, issn = {1066-2243 }, doi = {10.1108/10662240610656528}, @@ -7337,8 +7268,7 @@ two shallow circuits: one for generating many arbitrarily but identically biased pages = {1-52}, abstract = {Although the benefits of information sharing between supply-chain partners are well known, many companies are averse to share their {\textquotedblleft}private{\textquotedblright} information due to fear of adverse impact of information leakage. This paper uses techniques from Secure Multiparty Computation (SMC) to develop {\textquotedblleft}secure protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting, and Replenishment) business process. The result is a process that permits supply-chain partners to capture all of the benefits of information-sharing and collaborative decision-making, but without disclosing their {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions) and cost information to one another. In our collaborative CPFR) scenario, the retailer and supplier engage in SMC protocols that result in: (1) a forecast that uses both the retailers and the suppliers observed demand signals to better forecast demand; and (2) prescribed order/shipment quantities based on system-wide costs and inventory levels (and on the joint forecasts) that minimize supply-chain expected cost/period. Our contributions are as follows: (1) we demonstrate that CPFR can be securely implemented without disclosing the private information of either partner; (2) we show that the CPFR business process is not incentive compatible without transfer payments and develop an incentive-compatible linear transfer-payment scheme for -collaborative forecasting; (3) we demonstrate that our protocols are not only secure (i.e., privacy preserving), but that neither partner is able to make accurate inferences about the others future demand signals from the outputs of the protocols; and (4) we illustrate the benefits of secure collaboration using simulation. -}, +collaborative forecasting; (3) we demonstrate that our protocols are not only secure (i.e., privacy preserving), but that neither partner is able to make accurate inferences about the others future demand signals from the outputs of the protocols; and (4) we illustrate the benefits of secure collaboration using simulation}, keywords = {chain computation management, CPFR, privacy, secure multi-party computation, secure supply, security, SMC}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Secure\%20Collaborative\%20Planning\%20Forecasting\%20and\%20Replenishment.pdf}, author = {Atallah, Mikhail and Marina Blanton and Vinayak Deshpand and Frikken, Keith and Li, Jiangtao and Leroy Schwarz} @@ -7571,8 +7501,7 @@ This result immediately implies solutions to other long-standing open problems s publisher = {Springer}, organization = {Springer}, address = {Cambridge, UK}, - abstract = {Location hidden services have received increasing attention as a means to resist censorship and protect the identity of service operators. Research and vulnerability analysis to date has mainly focused on how to locate the hidden service. But while the hiding techniques have improved, almost no progress has been made in increasing the resistance against DoS attacks directly or indirectly on hidden services. In this paper we suggest improvements that should be easy to adopt within the existing hidden service design, improvements that will both reduce vulnerability to DoS attacks and add QoS as a service option. In addition we show how to hide not just the location but the existence of the hidden service from everyone but the users knowing its service address. Not even the public directory servers will know how a private hidden service can be contacted, or know it exists. -}, + abstract = {Location hidden services have received increasing attention as a means to resist censorship and protect the identity of service operators. Research and vulnerability analysis to date has mainly focused on how to locate the hidden service. But while the hiding techniques have improved, almost no progress has been made in increasing the resistance against DoS attacks directly or indirectly on hidden services. In this paper we suggest improvements that should be easy to adopt within the existing hidden service design, improvements that will both reduce vulnerability to DoS attacks and add QoS as a service option. In addition we show how to hide not just the location but the existence of the hidden service from everyone but the users knowing its service address. Not even the public directory servers will know how a private hidden service can be contacted, or know it exists}, keywords = {censorship resistance, information hiding}, isbn = {978-3-540-68790-0}, doi = {10.1007/11957454}, @@ -7589,8 +7518,7 @@ This result immediately implies solutions to other long-standing open problems s year = {2006}, pages = {241{\textendash}255}, publisher = {Springer}, - abstract = {A shuffle takes a list of ciphertexts and outputs a permuted list of re-encryptions of the input ciphertexts. Mix-nets, a popular method for anonymous routing, can be constructed from a sequence of shuffles and decryption. We propose a formal model for security of verifiable shuffles and a new verifiable shuffle system based on the Paillier encryption scheme, and prove its security in the proposed dmodel. The model is general and can be extended to provide provable security for verifiable shuffle decryption. -}, + abstract = {A shuffle takes a list of ciphertexts and outputs a permuted list of re-encryptions of the input ciphertexts. Mix-nets, a popular method for anonymous routing, can be constructed from a sequence of shuffles and decryption. We propose a formal model for security of verifiable shuffles and a new verifiable shuffle system based on the Paillier encryption scheme, and prove its security in the proposed dmodel. The model is general and can be extended to provide provable security for verifiable shuffle decryption}, keywords = {formal security model, paillier public-key system, privacy, verifiable shuffles}, issn = {1615-5262}, doi = {10.1007/s10207-006-0004-8}, @@ -7669,8 +7597,7 @@ We show that applying encoding based on universal re-encryption can solve many o publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, - abstract = {This paper evaluates the ability of a wireless mesh architecture to provide high performance Internet access while demanding little deployment planning or operational management. The architecture considered in this paper has unplanned node placement (rather than planned topology), omni-directional antennas (rather than directional links), and multi-hop routing (rather than single-hop base stations). These design decisions contribute to ease of deployment, an important requirement for community wireless networks. However, this architecture carries the risk that lack of planning might render the network{\textquoteright}s performance unusably low. For example, it might be necessary to place nodes carefully to ensure connectivity; the omni-directional antennas might provide uselessly short radio ranges; or the inefficiency of multi-hop forwarding might leave some users effectively disconnected.The paper evaluates this unplanned mesh architecture with a case study of the Roofnet 802.11b mesh network. Roofnet consists of 37 nodes spread over four square kilometers of an urban area. The network provides users with usable performance despite lack of planning: the average inter-node throughput is 627 kbits/second, even though the average route has three hops.The paper evaluates multiple aspects of the architecture: the effect of node density on connectivity and throughput; the characteristics of the links that the routing protocol elects to use; the usefulness of the highly connected mesh afforded by omni-directional antennas for robustness and throughput; and the potential performance of a single-hop network using the same nodes as Roofnet. -}, + abstract = {This paper evaluates the ability of a wireless mesh architecture to provide high performance Internet access while demanding little deployment planning or operational management. The architecture considered in this paper has unplanned node placement (rather than planned topology), omni-directional antennas (rather than directional links), and multi-hop routing (rather than single-hop base stations). These design decisions contribute to ease of deployment, an important requirement for community wireless networks. However, this architecture carries the risk that lack of planning might render the network{\textquoteright}s performance unusably low. For example, it might be necessary to place nodes carefully to ensure connectivity; the omni-directional antennas might provide uselessly short radio ranges; or the inefficiency of multi-hop forwarding might leave some users effectively disconnected.The paper evaluates this unplanned mesh architecture with a case study of the Roofnet 802.11b mesh network. Roofnet consists of 37 nodes spread over four square kilometers of an urban area. The network provides users with usable performance despite lack of planning: the average inter-node throughput is 627 kbits/second, even though the average route has three hops.The paper evaluates multiple aspects of the architecture: the effect of node density on connectivity and throughput; the characteristics of the links that the routing protocol elects to use; the usefulness of the highly connected mesh afforded by omni-directional antennas for robustness and throughput; and the potential performance of a single-hop network using the same nodes as Roofnet}, keywords = {ad-hoc networks, mesh networks, multi-hop networks, route metrics, wireless routing}, isbn = {1-59593-020-5}, doi = {10.1145/1080829.1080833}, @@ -8195,8 +8122,7 @@ We provide a formal definition of onion-routing in the universally composable fr address = {Aarhus, Denmark }, abstract = {We introduce a new type of Identity-Based Encryption (IBE) scheme that we call Fuzzy Identity-Based Encryption. In Fuzzy IBE we view an identity as set of descriptive attributes. A Fuzzy IBE scheme allows for a private key for an identity, ω, to decrypt a ciphertext encrypted with an identity, ω , if and only if the identities ω and ω are close to each other as measured by the {\textquotedblleft}set overlap{\textquotedblright} distance metric. A Fuzzy IBE scheme can be applied to enable encryption using biometric inputs as identities; the error-tolerance property of a Fuzzy IBE scheme is precisely what allows for the use of biometric identities, which inherently will have some noise each time they are sampled. Additionally, we show that Fuzzy-IBE can be used for a type of application that we term {\textquotedblleft}attribute-based encryption{\textquotedblright}. -In this paper we present two constructions of Fuzzy IBE schemes. Our constructions can be viewed as an Identity-Based Encryption of a message under several attributes that compose a (fuzzy) identity. Our IBE schemes are both error-tolerant and secure against collusion attacks. Additionally, our basic construction does not use random oracles. We prove the security of our schemes under the Selective-ID security model. -}, +In this paper we present two constructions of Fuzzy IBE schemes. Our constructions can be viewed as an Identity-Based Encryption of a message under several attributes that compose a (fuzzy) identity. Our IBE schemes are both error-tolerant and secure against collusion attacks. Additionally, our basic construction does not use random oracles. We prove the security of our schemes under the Selective-ID security model}, keywords = {Fuzzy IBE, IBE}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/EUROCRYPT\%2705\%20-\%20Fuzzy\%20Identity-Based\%20Encryption.pdf}, author = {Amit Sahai and Waters, Brent} @@ -8296,8 +8222,7 @@ In this paper we present two constructions of Fuzzy IBE schemes. Our constructio publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, - abstract = {This paper introduces Hydra, a platform that we are developing for highly survivable and secure data storage systems that distribute information over networks and adapt timely to environment changes, enabling users to store and access critical data in a continuously available and highly trustable fashion. The Hydra platform uses MDS array codes that can be encoded and decoded efficiently for distributing and recovering user data. Novel uses of MDS array codes in Hydra are discussed, as well as Hydra{\textquoteright}s design goals, general structures and a set of basic operations on user data. We also explore Hydra{\textquoteright}s applications in survivable and secure data storage systems. -}, + abstract = {This paper introduces Hydra, a platform that we are developing for highly survivable and secure data storage systems that distribute information over networks and adapt timely to environment changes, enabling users to store and access critical data in a continuously available and highly trustable fashion. The Hydra platform uses MDS array codes that can be encoded and decoded efficiently for distributing and recovering user data. Novel uses of MDS array codes in Hydra are discussed, as well as Hydra{\textquoteright}s design goals, general structures and a set of basic operations on user data. We also explore Hydra{\textquoteright}s applications in survivable and secure data storage systems}, keywords = {storage}, isbn = {1-59593-233-X}, doi = {10.1145/1103780.1103797}, @@ -9083,8 +9008,7 @@ In this paper, we present a fully self-organizing routing scheme that is able to pages = {188-197}, publisher = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, - abstract = {We propose a service discovery architecture called VSD (service discovery based on volunteers) for heterogeneous and dynamic pervasive computing environments. The proposed architecture uses a small subset of the nodes called volunteers that perform directory services. Relatively stable and capable nodes serve as volunteers, thus recognizing node heterogeneity in terms of mobility and capability. We discuss characteristics of VSD architecture and methods to improve connectivity among volunteers for higher discovery rate. By showing that VSD performs quite well compared to a broadcast based scheme in MANET scenarios, we validate that VSD is a flexible and adaptable architecture appropriate for dynamic pervasive computing environments. VSD incorporates several novel features: i) handles dynamism and supports self-reconfiguration; ii) provides physical locality and scalability; and iii) improves reliability and copes with uncertainty through redundancy by forming overlapped clusters. -}, + abstract = {We propose a service discovery architecture called VSD (service discovery based on volunteers) for heterogeneous and dynamic pervasive computing environments. The proposed architecture uses a small subset of the nodes called volunteers that perform directory services. Relatively stable and capable nodes serve as volunteers, thus recognizing node heterogeneity in terms of mobility and capability. We discuss characteristics of VSD architecture and methods to improve connectivity among volunteers for higher discovery rate. By showing that VSD performs quite well compared to a broadcast based scheme in MANET scenarios, we validate that VSD is a flexible and adaptable architecture appropriate for dynamic pervasive computing environments. VSD incorporates several novel features: i) handles dynamism and supports self-reconfiguration; ii) provides physical locality and scalability; and iii) improves reliability and copes with uncertainty through redundancy by forming overlapped clusters}, isbn = {0-7803-9032-6}, doi = {10.1109/PERSER.2005.1506410}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/PERSER.2005.1506410}, @@ -9270,8 +9194,7 @@ In this paper we present a novel routing approach that is capable of handling co organization = {IEEE Computer Society Washington, DC, USA}, type = {publication}, address = {Sydney, Australia}, - abstract = {In this paper, we briefly present a novel routing algorithm, scalable source routing (SSR), which is capable of memory and message efficient routing in networks with {\textquoteright}random topology{\textquoteright}. This algorithm enables sensor networks to use recent peer to-peer mechanisms from the field of overlay networks, like e.g. distributed hash tables and indirection infrastructures. Unlike other proposals along that direction, SSR integrates all necessary routing tasks into one simple, highly efficient routing protocol. Simulations demonstrate that in a small-world network with more than 100 000 nodes, SSR requires each node to only store routing data for 255 other nodes to establish routes between arbitrary pairs of nodes. These routes are on average only about 20-30\% longer than the globally optimal path between these nodes. -}, + abstract = {In this paper, we briefly present a novel routing algorithm, scalable source routing (SSR), which is capable of memory and message efficient routing in networks with {\textquoteright}random topology{\textquoteright}. This algorithm enables sensor networks to use recent peer to-peer mechanisms from the field of overlay networks, like e.g. distributed hash tables and indirection infrastructures. Unlike other proposals along that direction, SSR integrates all necessary routing tasks into one simple, highly efficient routing protocol. Simulations demonstrate that in a small-world network with more than 100 000 nodes, SSR requires each node to only store routing data for 255 other nodes to establish routes between arbitrary pairs of nodes. These routes are on average only about 20-30\% longer than the globally optimal path between these nodes}, keywords = {scalable source routing, topology matching}, isbn = {0-7803-9246-9}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, @@ -9372,7 +9295,7 @@ In this paper we propose an {\textquotedblleft}onion-like{\textquotedblright} en @conference {Mislove04ap3:cooperative, title = {AP3: Cooperative, decentralized anonymous communication}, booktitle = {IN PROC. OF SIGOPS EUROPEAN WORKSHOP}, - author = {Mislove, Alan and Oberoi, Gaurav and Post, Ansley and Reis, Charles and Druschel, Peter and Wallach, Dan S.}, + author = {Mislove, Alan and Oberoi, Gaurav and Post, Ansley and Reis, Charles and Druschel, Peter and Wallach, Dan S}, year = {2004}, abstract = {This paper describes a cooperative overlay network that provides anonymous communication services for participating users. The Anonymizing Peer-to-Peer Proxy (AP3) system provides clients with three primitives: (i) anonymous message delivery, (ii) anonymous channels, and (iii) secure pseudonyms. AP3 is designed to be lightweight, low-cost and provides "probable innocence" anonymity to participating users, even under a large-scale coordinated attack by a limited fraction of malicious overlay nodes. Additionally, we use AP3{\textquoteright}s primitives to build novel anonymous group communication facilities (multicast and anycast), which shield the identity of both publishers and subscribers}, keywords = {anonymity, Peer-to-Peer Proxy}, @@ -9433,8 +9356,7 @@ In this paper we propose an {\textquotedblleft}onion-like{\textquotedblright} en pages = {11{\textendash}33}, publisher = {IEEE Computer Society Press}, address = {Los Alamitos, CA, USA}, - abstract = {This paper gives the main definitions relating to dependability, a generic concept including as special case such attributes as reliability, availability, safety, integrity, maintainability, etc. Security brings in concerns for confidentiality, in addition to availability and integrity. Basic definitions are given first. They are then commented upon, and supplemented by additional definitions, which address the threats to dependability and security (faults, errors, failures), their attributes, and the means for their achievement (fault prevention, fault tolerance, fault removal, fault forecasting). The aim is to explicate a set of general concepts, of relevance across a wide range of situations and, therefore, helping communication and cooperation among a number of scientific and technical communities, including ones that are concentrating on particular types of system, of system failures, or of causes of system failures. -}, + abstract = {This paper gives the main definitions relating to dependability, a generic concept including as special case such attributes as reliability, availability, safety, integrity, maintainability, etc. Security brings in concerns for confidentiality, in addition to availability and integrity. Basic definitions are given first. They are then commented upon, and supplemented by additional definitions, which address the threats to dependability and security (faults, errors, failures), their attributes, and the means for their achievement (fault prevention, fault tolerance, fault removal, fault forecasting). The aim is to explicate a set of general concepts, of relevance across a wide range of situations and, therefore, helping communication and cooperation among a number of scientific and technical communities, including ones that are concentrating on particular types of system, of system failures, or of causes of system failures}, keywords = {attack, fault removal, fault-tolerance, index terms-dependability, trust, vulnerability}, issn = {1545-5971}, doi = {10.1109/TDSC.2004.2}, @@ -9504,8 +9426,7 @@ We identify flaws in the software in Reliable that further compromise its abilit publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, - abstract = {In this paper we present a quantitative study of data survival in peer to peer storage systems. We first recall two main redundancy mechanisms: replication and erasure codes, which are used by most peer to peer storage systems like OceanStore, PAST or CFS, to guarantee data durability. Second we characterize peer to peer systems according to a volatility factor (a peer is free to leave the system at anytime) and to an availability factor (a peer is not permanently connected to the system). Third we model the behavior of a system as a Markov chain and analyse the average life time of data (MTTF) according to the volatility and availability factors. We also present the cost of the repair process based on these redundancy schemes to recover failed peers. The conclusion of this study is that when there is no high availability of peers, a simple replication scheme may be more efficient than sophisticated erasure codes. -}, + abstract = {In this paper we present a quantitative study of data survival in peer to peer storage systems. We first recall two main redundancy mechanisms: replication and erasure codes, which are used by most peer to peer storage systems like OceanStore, PAST or CFS, to guarantee data durability. Second we characterize peer to peer systems according to a volatility factor (a peer is free to leave the system at anytime) and to an availability factor (a peer is not permanently connected to the system). Third we model the behavior of a system as a Markov chain and analyse the average life time of data (MTTF) according to the volatility and availability factors. We also present the cost of the repair process based on these redundancy schemes to recover failed peers. The conclusion of this study is that when there is no high availability of peers, a simple replication scheme may be more efficient than sophisticated erasure codes}, keywords = {P2P, redundancy, storage}, isbn = {0-7803-8430-X}, url = {http://portal.acm.org/citation.cfm?id=1111777$\#$}, @@ -10496,8 +10417,7 @@ In this paper we improve these results: we show that the same level of unlinkabi publisher = {USENIX Association}, organization = {USENIX Association}, address = {Berkeley, CA, USA}, - abstract = {Ongoing advancements in technology lead to ever-increasing storage capacities. In spite of this, optimizing storage usage can still provide rich dividends. Several techniques based on delta-encoding and duplicate block suppression have been shown to reduce storage overheads, with varying requirements for resources such as computation and memory. We propose a new scheme for storage reduction that reduces data sizes with an effectiveness comparable to the more expensive techniques, but at a cost comparable to the faster but less effective ones. The scheme, called Redundancy Elimination at the Block Level (REBL), leverages the benefits of compression, duplicate block suppression, and delta-encoding to eliminate a broad spectrum of redundant data in a scalable and efficient manner. REBL generally encodes more compactly than compression (up to a factor of 14) and a combination of compression and duplicate suppression (up to a factor of 6.7). REBL also encodes similarly to a technique based on delta-encoding, reducing overall space significantly in one case. Furthermore, REBL uses super-fingerprints, a technique that reduces the data needed to identify similar blocks while dramatically reducing the computational requirements of matching the blocks: it turns O(n2) comparisons into hash table lookups. As a result, using super-fingerprints to avoid enumerating matching data objects decreases computation in the resemblance detection phase of REBL by up to a couple orders of magnitude. -}, + abstract = {Ongoing advancements in technology lead to ever-increasing storage capacities. In spite of this, optimizing storage usage can still provide rich dividends. Several techniques based on delta-encoding and duplicate block suppression have been shown to reduce storage overheads, with varying requirements for resources such as computation and memory. We propose a new scheme for storage reduction that reduces data sizes with an effectiveness comparable to the more expensive techniques, but at a cost comparable to the faster but less effective ones. The scheme, called Redundancy Elimination at the Block Level (REBL), leverages the benefits of compression, duplicate block suppression, and delta-encoding to eliminate a broad spectrum of redundant data in a scalable and efficient manner. REBL generally encodes more compactly than compression (up to a factor of 14) and a combination of compression and duplicate suppression (up to a factor of 6.7). REBL also encodes similarly to a technique based on delta-encoding, reducing overall space significantly in one case. Furthermore, REBL uses super-fingerprints, a technique that reduces the data needed to identify similar blocks while dramatically reducing the computational requirements of matching the blocks: it turns O(n2) comparisons into hash table lookups. As a result, using super-fingerprints to avoid enumerating matching data objects decreases computation in the resemblance detection phase of REBL by up to a couple orders of magnitude}, url = {http://portal.acm.org/citation.cfm?id=1247420$\#$}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.91.8331.pdf}, author = {Kulkarni, Purushottam and Douglis, Fred and Jason Lavoie and Tracey, John M.} @@ -10645,8 +10565,7 @@ This paper describes the design and implementation of a secure, reliable, and sc organization = {ACM Press}, abstract = {Developing sensor network applications demands a new set of tools to aid programmers. A number of simulation environments have been developed that provide varying degrees of scalability, realism, and detail for understanding the behavior of sensor networks. To date, however, none of these tools have addressed one of the most important aspects of sensor application design: that of power consumption. While simple approximations of overall power usage can be derived from estimates of node duty cycle and communication rates, these techniques often fail to capture the detailed, low-level energy requirements of the CPU, radio, sensors, and other peripherals. -In this paper, we present, a scalable simulation environment for wireless sensor networks that provides an accurate, per-node estimate of power consumption. PowerTOSSIM is an extension to TOSSIM, an event-driven simulation environment for TinyOS applications. In PowerTOSSIM, TinyOS components corresponding to specific hardware peripherals (such as the radio, EEPROM, LEDs, and so forth) are instrumented to obtain a trace of each device{\textquoteright}s activity during the simulation runPowerTOSSIM employs a novel code-transformation technique to estimate the number of CPU cycles executed by each node, eliminating the need for expensive instruction-level simulation of sensor nodes. PowerTOSSIM includes a detailed model of hardware energy consumption based on the Mica2 sensor node platform. Through instrumentation of actual sensor nodes, we demonstrate that PowerTOSSIM provides accurate estimation of power consumption for a range of applications and scales to support very large simulations. -}, +In this paper, we present, a scalable simulation environment for wireless sensor networks that provides an accurate, per-node estimate of power consumption. PowerTOSSIM is an extension to TOSSIM, an event-driven simulation environment for TinyOS applications. In PowerTOSSIM, TinyOS components corresponding to specific hardware peripherals (such as the radio, EEPROM, LEDs, and so forth) are instrumented to obtain a trace of each device{\textquoteright}s activity during the simulation runPowerTOSSIM employs a novel code-transformation technique to estimate the number of CPU cycles executed by each node, eliminating the need for expensive instruction-level simulation of sensor nodes. PowerTOSSIM includes a detailed model of hardware energy consumption based on the Mica2 sensor node platform. Through instrumentation of actual sensor nodes, we demonstrate that PowerTOSSIM provides accurate estimation of power consumption for a range of applications and scales to support very large simulations}, keywords = {sensor networks, TinyOS}, doi = {10.1145/1031495.1031518}, url = {http://portal.acm.org/citation.cfm?id=1031495.1031518}, @@ -11274,8 +11193,7 @@ participants in this context. The approach presented does not use credentials o GNUnet aims to provide anonymity for its users. Its design makes it hard to link a transaction to the node where it originated from. While anonymity requirements make a global view of the end-points of a transaction infeasible, the local link-to-link messages can be fully authenticated. Our economic model is based entirely on this local view of the network and takes only local -decisions. -}, +decisions}, keywords = {anonymity, file-sharing, GNUnet}, url = {http://grothoff.org/christian/ebe.pdf}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ebe.pdf}, @@ -11814,7 +11732,7 @@ We analyse the anonymity of connection-based systems against passive adversaries publisher = {IEEE Press}, organization = {IEEE Press}, address = {Seattle, Washington}, - abstract = {PlanetP is a peer-to-peer system in which searching content is done mostly locally. Every peer knows which content is available at which other peers. The index information is represented compactly using bloom filters and distributed throughout the network using push and pull mechanisms. }, + abstract = {PlanetP is a peer-to-peer system in which searching content is done mostly locally. Every peer knows which content is available at which other peers. The index information is represented compactly using bloom filters and distributed throughout the network using push and pull mechanisms }, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.6056\&rep=rep1\&type=url\&i=0}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/paper.dvi_.pdf}, author = {Francisco Matias Cuenca-Acuna and Christopher Peery and Richard P. Martin and Thu D. Nguyen} @@ -12254,7 +12172,7 @@ We provide a rigorous stochastic analysis of how much information is revealed by pages = {579{\textendash}592}, publisher = {Kluwer Academic Publishers}, address = {Hingham, MA, USA}, - abstract = {In military and rescue applications of mobile ad hoc networks, all the nodes belong to the same authority; therefore, they are motivated to cooperate in order to support the basic functions of the network. In this paper, we consider the case when each node is its own authority and tries to maximize the benefits it gets from the network. More precisely, we assume that the nodes are not willing to forward packets for the benefit of other nodes. This problem may arise in civilian applications of mobile ad hoc networks. In order to stimulate the nodes for packet forwarding, we propose a simple mechanism based on a counter in each node. We study the behavior of the proposed mechanism analytically and by means of simulations, and detail the way in which it could be protected against misuse. }, + abstract = {In military and rescue applications of mobile ad hoc networks, all the nodes belong to the same authority; therefore, they are motivated to cooperate in order to support the basic functions of the network. In this paper, we consider the case when each node is its own authority and tries to maximize the benefits it gets from the network. More precisely, we assume that the nodes are not willing to forward packets for the benefit of other nodes. This problem may arise in civilian applications of mobile ad hoc networks. In order to stimulate the nodes for packet forwarding, we propose a simple mechanism based on a counter in each node. We study the behavior of the proposed mechanism analytically and by means of simulations, and detail the way in which it could be protected against misuse }, keywords = {ad-hoc networks, cooperation, self-organization}, issn = {1383-469X}, doi = {10.1023/A:1025146013151 }, @@ -13586,8 +13504,7 @@ We further investigate this intriguing proposal. Specifically, we pages = {299{\textendash}314}, publisher = {ACM}, address = {New York, NY, USA}, - abstract = {Structured peer-to-peer overlay networks provide a substrate for the construction of large-scale, decentralized applications, including distributed storage, group communication, and content distribution. These overlays are highly resilient; they can route messages correctly even when a large fraction of the nodes crash or the network partitions. But current overlays are not secure; even a small fraction of malicious nodes can prevent correct message delivery throughout the overlay. This problem is particularly serious in open peer-to-peer systems, where many diverse, autonomous parties without preexisting trust relationships wish to pool their resources. This paper studies attacks aimed at preventing correct message delivery in structured peer-to-peer overlays and presents defenses to these attacks. We describe and evaluate techniques that allow nodes to join the overlay, to maintain routing state, and to forward messages securely in the presence of malicious nodes. -}, + abstract = {Structured peer-to-peer overlay networks provide a substrate for the construction of large-scale, decentralized applications, including distributed storage, group communication, and content distribution. These overlays are highly resilient; they can route messages correctly even when a large fraction of the nodes crash or the network partitions. But current overlays are not secure; even a small fraction of malicious nodes can prevent correct message delivery throughout the overlay. This problem is particularly serious in open peer-to-peer systems, where many diverse, autonomous parties without preexisting trust relationships wish to pool their resources. This paper studies attacks aimed at preventing correct message delivery in structured peer-to-peer overlays and presents defenses to these attacks. We describe and evaluate techniques that allow nodes to join the overlay, to maintain routing state, and to forward messages securely in the presence of malicious nodes}, keywords = {P2P, resilient overlay network}, issn = {0163-5980}, doi = {10.1145/844128.844156}, @@ -13671,8 +13588,7 @@ We further investigate this intriguing proposal. Specifically, we pages = {449{\textendash}462}, publisher = {IEEE Press}, address = {Piscataway, NJ, USA}, - abstract = {Software merging is an essential aspect of the maintenance and evolution of large-scale software systems. This paper provides a comprehensive survey and analysis of available merge approaches. Over the years, a wide variety of different merge techniques has been proposed. While initial techniques were purely based on textual merging, more powerful approaches also take the syntax and semantics of the software into account. There is a tendency towards operation-based merging because of its increased expressiveness. Another tendency is to try to define merge techniques that are as general, accurate, scalable, and customizable as possible, so that they can be used in any phase in the software life-cycle and detect as many conflicts as possible. After comparing the possible merge techniques, we suggest a number of important open problems and future research directions. -}, + abstract = {Software merging is an essential aspect of the maintenance and evolution of large-scale software systems. This paper provides a comprehensive survey and analysis of available merge approaches. Over the years, a wide variety of different merge techniques has been proposed. While initial techniques were purely based on textual merging, more powerful approaches also take the syntax and semantics of the software into account. There is a tendency towards operation-based merging because of its increased expressiveness. Another tendency is to try to define merge techniques that are as general, accurate, scalable, and customizable as possible, so that they can be used in any phase in the software life-cycle and detect as many conflicts as possible. After comparing the possible merge techniques, we suggest a number of important open problems and future research directions}, keywords = {conflict detection, large-scale software development, merge conflicts, software merging}, issn = {0098-5589}, doi = {10.1109/TSE.2002.1000449}, @@ -13722,7 +13638,7 @@ We further investigate this intriguing proposal. Specifically, we pages = {375{\textendash}408}, publisher = {ACM}, address = {New York, NY, USA}, - abstract = {This survey covers rollback-recovery techniques that do not require special language constructs. In the first part of the survey we classify rollback-recovery protocols into checkpoint-based and log-based. Checkpoint-based protocols rely solely on checkpointing for system state restoration. Checkpointing can be coordinated, uncoordinated, or communication-induced. Log-based protocols combine checkpointing with logging of nondeterministic events, encoded in tuples called determinants. Depending on how determinants are logged, log-based protocols can be pessimistic, optimistic, or causal. Throughout the survey, we highlight the research issues that are at the core of rollback-recovery and present the solutions that currently address them. We also compare the performance of different rollback-recovery protocols with respect to a series of desirable properties and discuss the issues that arise in the practical implementations of these protocols. }, + abstract = {This survey covers rollback-recovery techniques that do not require special language constructs. In the first part of the survey we classify rollback-recovery protocols into checkpoint-based and log-based. Checkpoint-based protocols rely solely on checkpointing for system state restoration. Checkpointing can be coordinated, uncoordinated, or communication-induced. Log-based protocols combine checkpointing with logging of nondeterministic events, encoded in tuples called determinants. Depending on how determinants are logged, log-based protocols can be pessimistic, optimistic, or causal. Throughout the survey, we highlight the research issues that are at the core of rollback-recovery and present the solutions that currently address them. We also compare the performance of different rollback-recovery protocols with respect to a series of desirable properties and discuss the issues that arise in the practical implementations of these protocols }, keywords = {message logging, rollback-recovery}, issn = {0360-0300}, doi = {10.1145/568522.568525}, @@ -13938,8 +13854,7 @@ We further investigate this intriguing proposal. Specifically, we publisher = {Springer-Verlag, LNCS 1962}, organization = {Springer-Verlag, LNCS 1962}, abstract = {Collecting accurate profile information and protecting an individual{\textquoteright}s privacy are ordinarily viewed as being at odds. This paper presents mechanisms that protect individual privacy while presenting accurate-indeed authenticated-profile information to servers and merchants. In particular, we give a pseudonym registration scheme and system that enforces unique user registration while separating trust required of registrars, issuers, and validators. This scheme enables the issuance of global unique pseudonyms (GUPs) and attributes enabling practical applications such as authentication of accurate attributes and enforcement of {\textquotedblleft}one-to-a-customer{\textquotedblright} properties. -We also present a scheme resilient to even pseudonymous profiling yet preserving the ability of merchants to authenticate the accuracy of information. It is the first mechanism of which the authors are aware to guarantee recent validity for group signatures, and more generally multi-group signatures, thus effectively enabling revocation of all or some of the multi-group certificates held by a principal. -}, +We also present a scheme resilient to even pseudonymous profiling yet preserving the ability of merchants to authenticate the accuracy of information. It is the first mechanism of which the authors are aware to guarantee recent validity for group signatures, and more generally multi-group signatures, thus effectively enabling revocation of all or some of the multi-group certificates held by a principal}, keywords = {privacy, pseudonym}, isbn = {978-3-540-42700-1}, doi = {10.1007/3-540-45472-1}, @@ -13983,7 +13898,7 @@ We also present a scheme resilient to even pseudonymous profiling yet preserving volume = {16}, year = {2001}, pages = {2003}, - abstract = {Applies graph theory to anonymity. The paper suffers from the fundamental problem that it does not discuss attacks on the scheme, and there are a couple of pretty basic ways to break anonymity. Also, the scheme uses lots of traffic; some variants end up looking much like a pipenet. }, + abstract = {Applies graph theory to anonymity. The paper suffers from the fundamental problem that it does not discuss attacks on the scheme, and there are a couple of pretty basic ways to break anonymity. Also, the scheme uses lots of traffic; some variants end up looking much like a pipenet }, url = {http://gecko.cs.purdue.edu/gnet/papers/BD.pdf }, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/BD.pdf}, author = {Amos Beimel and Shlomi Dolev} @@ -14047,8 +13962,7 @@ We also present a scheme resilient to even pseudonymous profiling yet preserving abstract = { Recent advances in hardware and communication technologies have made possible and cost e ective to share a file system among several machines over a local (but possibly also a wide) area network. One of the most successful and widely used such applications is Sun{\textquoteright}s Network File System (NFS). NFS is very simple in structure but assumes a very strong trust model: the user trusts the remote le system server (which might be running on a machine in di erent country) and a network with his/her data. It is easy to see that neither assumption is a very realistic one. The server (or anybody with superuser privileges) might very well read the data on its local lesytem and it is well known that the Internet or any local area network (e.g, Ethernet) is very easy to tap (see for example, Berkeley{\textquoteright}s tcpdump 7, 5] application program). Impersoni cation of users is also another security drawback of NFS. In fact, most of the permission checking over NFS are performed in the kernel of the client. In such a context a pirate can temporarely assign to his own workstation the Internet address of victim. Without secure RPC 9] no further authentication procedure is requested. From here on, the pirate can issue NFS requests presenting himself with any (false) uid and therefore accessing for reading and writing any private data on the server, even protected data. Given the above, a user seeking a certain level of security should take some measures. Possible solutions are to use either user-level cryptography or application level cryptography. A discussion of the drawbacks of these approaches is found in 4]. A better approach is to push encryption services into the operating system as done by M. Blaze in the design of his CFS 4]. -In this paper, we propose a new cryptographic le system, which we call TCFS , as a suitable solution to the problem of privacy for distributed le system (see section 2.1). Our work improves on CFS by providing a deeper integration between the encryption service and the le system which results in a complete transparency of use to the user applications. -}, +In this paper, we propose a new cryptographic le system, which we call TCFS , as a suitable solution to the problem of privacy for distributed le system (see section 2.1). Our work improves on CFS by providing a deeper integration between the encryption service and the le system which results in a complete transparency of use to the user applications}, keywords = {crytographic file system, UNIX}, isbn = {1-880446-10-3}, url = {http://dl.acm.org/citation.cfm?id=647054.715628}, @@ -14145,7 +14059,7 @@ We then show how these building blocks can be used for applying the scheme to ef title = {The Gnutella Protocol Specification v0.4}, author = {TODO}, year = {2001}, - abstract = {A brief description of the gnutella protocol. }, + abstract = {A brief description of the gnutella protocol }, url = {http://www9.limewire.com/developer/gnutella_protocol_0.4.pdf} } @conference {Cabrera01herald:achieving, @@ -14346,7 +14260,7 @@ This compilation represents the collected wisdom of today{\textquoteright}s peer year = {2001}, publisher = {O{\textquoteright}Reilly Media}, organization = {O{\textquoteright}Reilly Media}, - abstract = {Description of the problems that arise when one tries to combine anonymity and accountability. Note that the Free Haven design described here charges for storing data in the network (downloads are free), whereas in GNUnet adding data is free and only the downloads are considered as utilization. }, + abstract = {Description of the problems that arise when one tries to combine anonymity and accountability. Note that the Free Haven design described here charges for storing data in the network (downloads are free), whereas in GNUnet adding data is free and only the downloads are considered as utilization }, author = {Roger Dingledine and Michael J. Freedman and David Molnar}, editor = {Andy oram} } @@ -14785,7 +14699,7 @@ This book focuses on the principal-agent model, the "simple" situation where a p month = {December}, publisher = {Zero Knowledge Systems, {Inc.}}, type = {White Paper}, - abstract = {This white paper, targeted at the technically savvy reader, offers a detailed look at the Freedom 2.0 System architecture. It is intended to give the reader a good understanding of the components that make up this system and the relationships between them, as well as to encourage analysis of the system.} + abstract = {This white paper, targeted at the technically savvy reader, offers a detailed look at the Freedom 2.0 System architecture. It is intended to give the reader a good understanding of the components that make up this system and the relationships between them, as well as to encourage analysis of the system}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/freedom2-arch.pdf}, author = {Philippe Boucher and Adam Shostack and Ian Goldberg} } @@ -14796,7 +14710,7 @@ This book focuses on the principal-agent model, the "simple" situation where a p month = jul, pages = {46{\textendash}66}, address = {Berkeley, CA, USA}, - abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node.} + abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node}, url = {http://www.ecse.rpi.edu/Homepages/shivkuma/teaching/sp2001/readings/freenet.pdf}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/freenet.pdf}, author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong} @@ -14988,7 +14902,7 @@ Results based on simulations confirm that Overcast provides its added functional booktitle = {In IEEE GLOBECOM}, year = {2000}, pages = {1707{\textendash}1711}, - abstract = {We present an architecture that enables the sharing of information among mobile, wireless, collaborating hosts that experience intermittent connectivity to the Internet. Participants in the system obtain data objects from Internet-connected servers, cache them and exchange them with others who are interested in them. The system exploits the fact that there is a high locality of information access within a geographic area. It aims to increase the data availability to participants with lost connectivity to the Internet. We discuss the main components of the system and possible applications. Finally, we present simulation results that show that the ad hoc networks can be very e$\#$ective in distributing popular information. 1 Introduction In a few years, a large percentage of the population in metropolitan areas will be equipped with PDAs, laptops or cell phones with built-in web browsers. Thus, access to information and entertainment will become as important as voice communications.}, + abstract = {We present an architecture that enables the sharing of information among mobile, wireless, collaborating hosts that experience intermittent connectivity to the Internet. Participants in the system obtain data objects from Internet-connected servers, cache them and exchange them with others who are interested in them. The system exploits the fact that there is a high locality of information access within a geographic area. It aims to increase the data availability to participants with lost connectivity to the Internet. We discuss the main components of the system and possible applications. Finally, we present simulation results that show that the ad hoc networks can be very e$\#$ective in distributing popular information. 1 Introduction In a few years, a large percentage of the population in metropolitan areas will be equipped with PDAs, laptops or cell phones with built-in web browsers. Thus, access to information and entertainment will become as important as voice communications}, keywords = {802.11, file-sharing}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.36.5640}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/globecom00.pdf}, @@ -15123,7 +15037,7 @@ Results based on simulations confirm that Overcast provides its added functional publisher = {ACM}, organization = {ACM}, address = {Atlanta, Georgia, USA}, - abstract = {We consider algorithmic problems in a distributed setting where the participants cannot be assumed to follow the algorithm but rather their own self-interest. As such participants, termed agents, are capable of manipulating the algorithm, the algorithm designer should ensure in advance that the agents {\textquoteright} interests are best served by behaving correctly. Following notions from the field of mechanism design, we suggest a framework for studying such algorithms. Our main technical contribution concerns the study of a representative task scheduling problem for which the standard mechanism design tools do not suffice. }, + abstract = {We consider algorithmic problems in a distributed setting where the participants cannot be assumed to follow the algorithm but rather their own self-interest. As such participants, termed agents, are capable of manipulating the algorithm, the algorithm designer should ensure in advance that the agents {\textquoteright} interests are best served by behaving correctly. Following notions from the field of mechanism design, we suggest a framework for studying such algorithms. Our main technical contribution concerns the study of a representative task scheduling problem for which the standard mechanism design tools do not suffice }, keywords = {algorithms, mechanis design}, isbn = {1-58113-067-8}, doi = {http://doi.acm.org/10.1145/301250.301287}, @@ -15500,7 +15414,7 @@ This exposition presents a model to formally study such algorithms. This model, publisher = {ACM Press}, organization = {ACM Press}, address = {El Paso, TX, United States}, - abstract = {Private information retrieval (PIR) schemes enable a user to access k replicated copies of a database (k 2), and privately retrieve one of the n bits of data stored in the databases. This means that the queries give each individual database no partial information (in the information theoretic sense) on the identity of the item retrieved by the user. Today, the best two database scheme (k = 2) has communication complexity O(n 1=3 ), while for any constant number, k, the best k database scheme has communication complexity O(n 1=(2k\Gamma1) ). The motivation for the present work is the question whether this complexity can be reduced if one is willing to achieve computational privacy, rather than information theoretic privacy. (This means that privacy is guaranteed only with respect to databases that are restricted to polynomial time computations.) We answer this question affirmatively, and Computer Science Dept., Technion, Haifa, Israel. }, + abstract = {Private information retrieval (PIR) schemes enable a user to access k replicated copies of a database (k 2), and privately retrieve one of the n bits of data stored in the databases. This means that the queries give each individual database no partial information (in the information theoretic sense) on the identity of the item retrieved by the user. Today, the best two database scheme (k = 2) has communication complexity O(n 1=3 ), while for any constant number, k, the best k database scheme has communication complexity O(n 1=(2k\Gamma1) ). The motivation for the present work is the question whether this complexity can be reduced if one is willing to achieve computational privacy, rather than information theoretic privacy. (This means that privacy is guaranteed only with respect to databases that are restricted to polynomial time computations.) We answer this question affirmatively, and Computer Science Dept., Technion, Haifa, Israel }, keywords = {communication complexity, private information retrieval}, isbn = {0-89791-888-6}, doi = {http://doi.acm.org/10.1145/258533.258609}, @@ -15735,7 +15649,7 @@ in the communication chain. This implies that neither the respondent nor his pro title = {The final frontier: Embedding networked sensors in the soil}, year = {1995}, publisher = {Lecture Notes in Computer Science}, - abstract = {This paper presents the first systematic design of a robust sensing system suited for the challenges presented by soil environments. We describe three soil deployments we have undertaken: in Bangladesh, and in California at the James Reserve and in the San Joaquin River basin. We discuss our experiences and lessons learned in deploying soil sensors. We present data from each deployment and evaluate our techniques for improving the information yield from these systems. Our most notable results include the following: in-situ calibration techniques to postpone labor-intensive and soil disruptive calibration events developed at the James Reserve; achieving a 91 \% network yield from a Mica2 wireless sensing system without end-to-end reliability in Bangladesh; and the javelin, a new platform that facilitates the deployment, replacement and in-situ calibration of soil sensors, deployed in the San Joaquin River basin. Our techniques to increase information yield have already led to scientifically promising results, including previously unexpected diurnal cycles in various soil chemistry parameters across several deployments. }, + abstract = {This paper presents the first systematic design of a robust sensing system suited for the challenges presented by soil environments. We describe three soil deployments we have undertaken: in Bangladesh, and in California at the James Reserve and in the San Joaquin River basin. We discuss our experiences and lessons learned in deploying soil sensors. We present data from each deployment and evaluate our techniques for improving the information yield from these systems. Our most notable results include the following: in-situ calibration techniques to postpone labor-intensive and soil disruptive calibration events developed at the James Reserve; achieving a 91 \% network yield from a Mica2 wireless sensing system without end-to-end reliability in Bangladesh; and the javelin, a new platform that facilitates the deployment, replacement and in-situ calibration of soil sensors, deployed in the San Joaquin River basin. Our techniques to increase information yield have already led to scientifically promising results, including previously unexpected diurnal cycles in various soil chemistry parameters across several deployments }, keywords = {sensor networks, wireless sensor network}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.7766}, www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.120.7766.pdf}, @@ -16057,7 +15971,7 @@ We also sketch applications of these signatures to a payment system, solving dis pages = {149{\textendash}154}, publisher = {ACM}, address = {New York, NY, USA}, - abstract = {This paper describes a technique for implementing the sort of small databases that frequently occur in the design of operating systems and distributed systems. We take advantage of the existence of very large virtual memories, and quite large real memories, to make the technique feasible. We maintain the database as a strongly typed data structure in virtual memory, record updates incrementally on disk in a log and occasionally make a checkpoint of the entire database. We recover from crashes by restoring the database from an old checkpoint then replaying the log. We use existing packages to convert between strongly typed data objects and their disk representations, and to communicate strongly typed data across the network (using remote procedure calls). Our memory is managed entirely by a general purpose allocator and garbage collector. This scheme has been used to implement a name server for a distributed system. The resulting implementation has the desirable property of being simultaneously simple, efficient and reliable. }, + abstract = {This paper describes a technique for implementing the sort of small databases that frequently occur in the design of operating systems and distributed systems. We take advantage of the existence of very large virtual memories, and quite large real memories, to make the technique feasible. We maintain the database as a strongly typed data structure in virtual memory, record updates incrementally on disk in a log and occasionally make a checkpoint of the entire database. We recover from crashes by restoring the database from an old checkpoint then replaying the log. We use existing packages to convert between strongly typed data objects and their disk representations, and to communicate strongly typed data across the network (using remote procedure calls). Our memory is managed entirely by a general purpose allocator and garbage collector. This scheme has been used to implement a name server for a distributed system. The resulting implementation has the desirable property of being simultaneously simple, efficient and reliable }, issn = {0163-5980}, doi = {10.1145/37499.37517}, url = {http://portal.acm.org/citation.cfm?id=37499.37517$\#$}, @@ -16319,8 +16233,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms year = {1976}, month = nov, pages = {644-654}, - abstract = {Two kinds of contemporary developments in cryptography are examined. Widening applications of teleprocessing have given rise to a need for new types of cryptographic systems, which minimize the need for secure key distribution channels and supply the equivalent of a written signature. This paper suggests ways to solve these currently open problems. It also discusses how the theories of communication and computation are beginning to provide the tools to solve cryptographic problems of long standing. -}, + abstract = {Two kinds of contemporary developments in cryptography are examined. Widening applications of teleprocessing have given rise to a need for new types of cryptographic systems, which minimize the need for secure key distribution channels and supply the equivalent of a written signature. This paper suggests ways to solve these currently open problems. It also discusses how the theories of communication and computation are beginning to provide the tools to solve cryptographic problems of long standing}, keywords = {cryptographic systems, cryptography}, issn = {0018-9448}, doi = {10.1109/TIT.1976.1055638},