commit 190ee05d1c8fa893d2e9705b0bbcb4f01a92bcc9
parent 9c0f8a68f70c606dab9fd4506f2282ac2b5256f3
Author: Nils Gillmann <ng0@n0.is>
Date: Sun, 7 Oct 2018 23:26:26 +0000
Remove ^M
Signed-off-by: Nils Gillmann <ng0@n0.is>
Diffstat:
| M | gnunetbib.bib | | | 1097 | +++++++++++++++++++++++++++++++++++++++---------------------------------------- |
1 file changed, 542 insertions(+), 555 deletions(-)
diff --git a/gnunetbib.bib b/gnunetbib.bib
@@ -267,7 +267,7 @@
pages = {63--82},
publisher = {Springer International Publishing},
organization = {Springer International Publishing},
- abstract = {The goal of Private Information Retrieval (PIR) is the ability to query a database successfully without the operator of the database server discovering which record(s) of the database the querier is interested in. There are two main classes of PIR protocols: those that provide privacy guarantees based on the computational limitations of servers (CPIR) and those that rely on multiple servers not colluding for privacy (IT-PIR). These two classes have different advantages and disadvantages that make them more or less attractive to designers of PIR-enabled privacy enhancing technologies.
+ abstract = {The goal of Private Information Retrieval (PIR) is the ability to query a database successfully without the operator of the database server discovering which record(s) of the database the querier is interested in. There are two main classes of PIR protocols: those that provide privacy guarantees based on the computational limitations of servers (CPIR) and those that rely on multiple servers not colluding for privacy (IT-PIR). These two classes have different advantages and disadvantages that make them more or less attractive to designers of PIR-enabled privacy enhancing technologies.
We present a hybrid PIR protocol that combines two PIR protocols, one from each of these classes. Our protocol inherits many positive aspects of both classes and mitigates some of the negative aspects. For example, our hybrid protocol maintains partial privacy when the security assumptions of one of the component protocols is broken, mitigating the privacy loss in such an event. We have implemented our protocol as an extension of the Percy++ library so that it combines a PIR protocol by Aguilar Melchor and Gaborit with one by Goldberg. We show that our hybrid protocol uses less communication than either of these component protocols and that our scheme is particularly beneficial when the number of records in a database is large compared to the size of the records. This situation arises in applications such as TLS certificate verification, anonymous communications systems, private LDAP lookups, and others},
isbn = {978-3-319-08505-0},
doi = {10.1007/978-3-319-08506-7_4},
@@ -381,7 +381,7 @@ We present a hybrid PIR protocol that combines two PIR protocols, one from each
pages = {204--223},
publisher = {Springer International Publishing},
organization = {Springer International Publishing},
- abstract = {Anonymous communication systems ensure that correspondence between senders and receivers cannot be inferred with certainty.However, when patterns are persistent, observations from anonymous
+ abstract = {Anonymous communication systems ensure that correspondence between senders and receivers cannot be inferred with certainty.However, when patterns are persistent, observations from anonymous
communication systems enable the reconstruction of user behavioral profiles. Protection against profiling can be enhanced by adding dummy messages, generated by users or by the anonymity provider, to the communication. In this paper we study the limits of the protection provided by this countermeasure. We propose an analysis methodology based on solving a least squares problem that permits to characterize the adversary{\textquoteright}s profiling error with respect to the user behavior, the anonymity provider behavior, and the dummy strategy. Focusing on the particular case of a timed pool mix we show how, given a privacy target, the performance analysis can be used to design optimal dummy strategies to protect this objective},
keywords = {anonymous communications, disclosure attacks, dummies},
isbn = {978-3-319-08505-0},
@@ -397,10 +397,10 @@ communication systems enable the reconstruction of user behavioral profiles. Pro
year = {2014},
month = may,
type = {Technical Report},
- abstract = {The recent NSA revelations have shown that {\textquotedblleft}address book{\textquotedblright} and {\textquotedblleft}buddy list{\textquotedblright} information are routinely targeted for mass interception. As a response to this threat, we present DP5, a cryptographic
-service that provides privacy-friendly indication of presence to support real-time communications. DP5 allows clients to register and query the online presence of their list of friends while keeping this
-list secret. Besides presence, high-integrity status updates are supported, to facilitate key update and rendezvous protocols. While infrastructure services are required for DP5 to operate, they are
-designed to not require any long-term secrets and provide perfect forward secrecy in case of compromise. We provide security arguments for the indistinguishability properties of the protocol, as well
+ abstract = {The recent NSA revelations have shown that {\textquotedblleft}address book{\textquotedblright} and {\textquotedblleft}buddy list{\textquotedblright} information are routinely targeted for mass interception. As a response to this threat, we present DP5, a cryptographic
+service that provides privacy-friendly indication of presence to support real-time communications. DP5 allows clients to register and query the online presence of their list of friends while keeping this
+list secret. Besides presence, high-integrity status updates are supported, to facilitate key update and rendezvous protocols. While infrastructure services are required for DP5 to operate, they are
+designed to not require any long-term secrets and provide perfect forward secrecy in case of compromise. We provide security arguments for the indistinguishability properties of the protocol, as well
as an evaluation of its performance},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/DP5\%3A\%20A\%20Private\%20Presence\%20Service.pdf},
www_section = {https://bibliography.gnunet.org},
@@ -447,10 +447,10 @@ as an evaluation of its performance},
pages = {123--142},
publisher = {Springer International Publishing},
organization = {Springer International Publishing},
- abstract = {Distributed encryption is a cryptographic primitive that implements revocable privacy. The primitive allows a recipient of a message to decrypt it only
-if enough senders encrypted that same message. We present a new distributed encryption scheme that is simpler than the previous solution by Hoepman and
-Galindo{\textemdash}in particular it does not rely on pairings{\textemdash}and that satisfies stronger security requirements. Moreover, we show how to achieve key evolution, which is necessary to ensure scalability in many practical applications, and prove that the
-resulting scheme is forward secure. Finally, we present a provably secure batched
+ abstract = {Distributed encryption is a cryptographic primitive that implements revocable privacy. The primitive allows a recipient of a message to decrypt it only
+if enough senders encrypted that same message. We present a new distributed encryption scheme that is simpler than the previous solution by Hoepman and
+Galindo{\textemdash}in particular it does not rely on pairings{\textemdash}and that satisfies stronger security requirements. Moreover, we show how to achieve key evolution, which is necessary to ensure scalability in many practical applications, and prove that the
+resulting scheme is forward secure. Finally, we present a provably secure batched
distributed encryption scheme that is much more efficient for small plaintext domains, but that requires more storage},
isbn = {978-3-319-08505-0},
doi = {10.1007/978-3-319-08506-7_7},
@@ -465,10 +465,10 @@ distributed encryption scheme that is much more efficient for small plaintext do
year = {2014},
month = aug,
type = {Master{\textquoteright}s},
- abstract = {Port scanning is used to discover vulnerable services and launch attacks against network infrastructure. Port knocking is a well-known technique to hide TCP servers from port scanners. This thesis presents the design of TCP Stealth, a socket option to realize new port knocking variant with improved security and usability compared to previous designs.
-
-TCP Stealth replaces the traditional random TCP SQN number with a token that authenticates the client and (optionally) the first bytes of the TCP payload. Clients and servers can enable TCP Stealth by explicitly setting a socket option or linking against a library that wraps existing network system calls.
-
+ abstract = {Port scanning is used to discover vulnerable services and launch attacks against network infrastructure. Port knocking is a well-known technique to hide TCP servers from port scanners. This thesis presents the design of TCP Stealth, a socket option to realize new port knocking variant with improved security and usability compared to previous designs.
+
+TCP Stealth replaces the traditional random TCP SQN number with a token that authenticates the client and (optionally) the first bytes of the TCP payload. Clients and servers can enable TCP Stealth by explicitly setting a socket option or linking against a library that wraps existing network system calls.
+
This thesis also describes Knock, a free software implementation of TCP Stealth for the Linux kernel and {\tt libknockify}, a shared library that wraps network system calls to activate Knock on GNU/Linux systems, allowing administrators to deploy Knock without recompilation. Finally, we present experimental results demonstrating that TCP Stealth is compatible with most existing middleboxes on the Internet},
keywords = {GNUnet, Hacienda, Knock, TCP Stealth},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ma_kirsch_2014_0.pdf},
@@ -497,15 +497,15 @@ This thesis also describes Knock, a free software implementation of TCP Stealth
school = {Technische Universitaet Muenchen},
type = {Masters},
address = {Garching bei Muenchen},
- abstract = {The successful operation of a peer-to-peer network depends on the resilience of its peer{\textquoteright}s
-communications. On the Internet, direct connections between peers are often limited by restrictions like NATs and traffic filtering. Addressing such problems is particularly pressing for peer-to-peer networks that do not wish to rely on any trusted infrastructure, which might otherwise help the participants establish communication channels. Modern peer-to-peer networks employ various techniques to address the problem of restricted connectivity on the Internet. One interesting development is that various overlay networks now support multiple communication protocols to improve resilience and counteract service degradation.
-
-The support of multiple protocols causes a number of new challenges. A peer should evaluate which protocols fulfill the communication requirements best. Furthermore, limited resources, such as bandwidth, should be distributed among peers and protocols to match application requirements. Existing approaches to this problem of transport selection and resource allocation are rigid: they calculate the solution only from the current state of the
-environment, and do not adapt their strategy based on failures or successes of previous
-allocations.
-
-This thesis explores the feasibility of using machine learning to improve the quality of the transport selection and resource allocation over current approaches. The goal is to improve the solution process by learning selection and allocation strategies from the experience gathered in the course of many iterations of the algorithm. We compare the different approaches in the field of machine learning with respect to their properties and suitability for the problem. Based on this evaluation and an in-depth analysis of the requirements of the underlying problem, the thesis presents a design how reinforcement learning can be used and adapted to the given problem domain.
-
+ abstract = {The successful operation of a peer-to-peer network depends on the resilience of its peer{\textquoteright}s
+communications. On the Internet, direct connections between peers are often limited by restrictions like NATs and traffic filtering. Addressing such problems is particularly pressing for peer-to-peer networks that do not wish to rely on any trusted infrastructure, which might otherwise help the participants establish communication channels. Modern peer-to-peer networks employ various techniques to address the problem of restricted connectivity on the Internet. One interesting development is that various overlay networks now support multiple communication protocols to improve resilience and counteract service degradation.
+
+The support of multiple protocols causes a number of new challenges. A peer should evaluate which protocols fulfill the communication requirements best. Furthermore, limited resources, such as bandwidth, should be distributed among peers and protocols to match application requirements. Existing approaches to this problem of transport selection and resource allocation are rigid: they calculate the solution only from the current state of the
+environment, and do not adapt their strategy based on failures or successes of previous
+allocations.
+
+This thesis explores the feasibility of using machine learning to improve the quality of the transport selection and resource allocation over current approaches. The goal is to improve the solution process by learning selection and allocation strategies from the experience gathered in the course of many iterations of the algorithm. We compare the different approaches in the field of machine learning with respect to their properties and suitability for the problem. Based on this evaluation and an in-depth analysis of the requirements of the underlying problem, the thesis presents a design how reinforcement learning can be used and adapted to the given problem domain.
+
The design is evaluated with the help of simulation and a realistic implementation in the GNUnet Peer-to-Peer framework. Our experimental results highlight some of the implications of the multitude of implementation choices, key challenges, and possible directions for the use of reinforcement learning in this domain},
keywords = {bandwidth allocation, GNUnet, machine learning},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/oehlmann2014machinelearning.pdf},
@@ -521,9 +521,9 @@ The design is evaluated with the help of simulation and a realistic implementati
school = {Technische Universitaet Muenchen},
type = {Bachelor{\textquoteright}s},
address = {Garching bei Muenchen},
- abstract = {Linear programming (LP) has numerous applications in different fields. In some scenarios, e.g. supply chain master planning (SCMP), the goal is solving linear programs involving multiple parties reluctant to sharing their private information. In this case, methods from the area of secure multi-party computation (SMC) can be used. Secure multi-party versions of LP solvers have been known to be impractical due to high communication complexity. To overcome this, solutions based on problem transformation have been put forward.
-
-In this thesis, one such algorithm, proposed by Dreier and Kerschbaum, is discussed, implemented, and evaluated with respect to numerical stability and scalability. Results
+ abstract = {Linear programming (LP) has numerous applications in different fields. In some scenarios, e.g. supply chain master planning (SCMP), the goal is solving linear programs involving multiple parties reluctant to sharing their private information. In this case, methods from the area of secure multi-party computation (SMC) can be used. Secure multi-party versions of LP solvers have been known to be impractical due to high communication complexity. To overcome this, solutions based on problem transformation have been put forward.
+
+In this thesis, one such algorithm, proposed by Dreier and Kerschbaum, is discussed, implemented, and evaluated with respect to numerical stability and scalability. Results
obtained with different parameter sets and different test cases are presented and some problems are exposed. It was found that the algorithm has some unforeseen limitations, particularly when implemented within the bounds of normal primitive data types. Random numbers generated during the protocol have to be extremely small so as to not cause problems with overflows after a series of multiplications. The number of peers participating additionally limits the size of numbers. A positive finding was that results produced when none of the aforementioned problems occur are generally quite accurate. We discuss a few possibilities to overcome some of the problems with an implementation using arbitrary precision numbers},
keywords = {GNUnet, linear programming, secure multi-party computation},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/arias2014bs.pdf},
@@ -578,8 +578,8 @@ In this paper we explore the implications of differential privacy when the indis
publisher = {Springer Verlag},
organization = {Springer Verlag},
address = {La Rochelle, France},
- abstract = {A central problem on the Internet today is that key infrastructure for security is concentrated in a few places. This is particularly true in the areas of naming and public key infrastructure. Secret services and other government organizations can use this fact to block access to information or monitor communications. One of the most popular and easy to perform techniques is to make information on the Web inaccessible by censoring or manipulating the Domain Name System (DNS). With the introduction of DNSSEC, the DNS is furthermore posed to become an alternative PKI to the failing X.509 CA system, further cementing the power of those in charge of operating DNS.
-
+ abstract = {A central problem on the Internet today is that key infrastructure for security is concentrated in a few places. This is particularly true in the areas of naming and public key infrastructure. Secret services and other government organizations can use this fact to block access to information or monitor communications. One of the most popular and easy to perform techniques is to make information on the Web inaccessible by censoring or manipulating the Domain Name System (DNS). With the introduction of DNSSEC, the DNS is furthermore posed to become an alternative PKI to the failing X.509 CA system, further cementing the power of those in charge of operating DNS.
+
This paper maps the design space and gives design requirements for censorship resistant name systems. We survey the existing range of ideas for the realization of such a system and discuss the challenges these systems have to overcome in practice. Finally, we present the results from a survey on browser usage, which supports the idea that delegation should be a key ingredient in any censorship resistant name system},
keywords = {DNS, GNS, GNU Name System, GNUnet, PKI, SDSI, Zooko{\textquoteright}s Triangle},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/fps2013wachs.pdf},
@@ -603,12 +603,12 @@ This paper maps the design space and gives design requirements for censorship re
school = {Technische Universitaet Muenchen},
type = {Masters },
address = {Garching bei Muenchen},
- abstract = {Evaluations of P2P protocols during the system{\textquoteright}s design and implementation phases are commonly done through simulation and emulation respectively. While the current state-of-the-art simulation allows evaluations with many millions of peers through the use of abstractions, emulation still lags behind as it involves executing the real implementation at some parts of the system. This difference in scales can make it hard to relate the evaluations made created with simulation and emulation during the design and implementation phases and can results in a limited evaluation of the implementation, which may cause severe problems after deployment.
-
-In this thesis, we build upon an existing emulator for P2P applications to push the scales offered by emulation towards the limits set by simulation. Our approach distributes and co-ordinates the emulation across many hosts. Large deployments are possible by deploying hundreds or thousands of peers on each host.
-
-To address the varying needs of an experimenter and the range of available hardware, we make our approach scalable such that it can easily be adapted to run evaluations on a single machine or a large group of hosts. Specifically, the system automatically adjusts the number of overlapping operations to the available resources efficiently using a feedback mechanism, thus relieving the experimenter from the hassles of manual tuning.
-
+ abstract = {Evaluations of P2P protocols during the system{\textquoteright}s design and implementation phases are commonly done through simulation and emulation respectively. While the current state-of-the-art simulation allows evaluations with many millions of peers through the use of abstractions, emulation still lags behind as it involves executing the real implementation at some parts of the system. This difference in scales can make it hard to relate the evaluations made created with simulation and emulation during the design and implementation phases and can results in a limited evaluation of the implementation, which may cause severe problems after deployment.
+
+In this thesis, we build upon an existing emulator for P2P applications to push the scales offered by emulation towards the limits set by simulation. Our approach distributes and co-ordinates the emulation across many hosts. Large deployments are possible by deploying hundreds or thousands of peers on each host.
+
+To address the varying needs of an experimenter and the range of available hardware, we make our approach scalable such that it can easily be adapted to run evaluations on a single machine or a large group of hosts. Specifically, the system automatically adjusts the number of overlapping operations to the available resources efficiently using a feedback mechanism, thus relieving the experimenter from the hassles of manual tuning.
+
We specifically target HPC systems like compute clusters and supercomputers and demonstrate how such systems can be used for large scale emulations by evaluating two P2P applications with deployment sizes up to 90k peers on a supercomputer},
keywords = {emulation, GNUnet, large scale testing, protocol evaluation, testbed},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/thesis_lowres.pdf , https://gnunet.org/git/bibliography.git/tree/docs/thesis.pdf},
@@ -624,8 +624,8 @@ We specifically target HPC systems like compute clusters and supercomputers and
school = {Technische Universit{\"a}t M{\"u}nchen},
type = {Bachelor Thesis},
address = {Munich},
- abstract = {Automatic crash handlers support software developers in finding bugs and fixing the problems in their code. Most of them behave similarly in providing the developer with a (symbolic) stack trace and a memory dump of the crashed application. This introduces some problems that we try to fix with our proposed automatic bug reporting system called "Monkey".
-
+ abstract = {Automatic crash handlers support software developers in finding bugs and fixing the problems in their code. Most of them behave similarly in providing the developer with a (symbolic) stack trace and a memory dump of the crashed application. This introduces some problems that we try to fix with our proposed automatic bug reporting system called "Monkey".
+
In this paper we describe the problems that occur when debugging widely distributed systems and how Monkey handles them. First, we describe our Motivation for develop- ing the Monkey system. Afterwards we present the most common existing automatic crash handlers and how they work. Thirdly you will get an overview of the Monkey system and its components. In the fourth chapter we will analyze one report gener- ated by Monkey, evaluate an online experiment we conducted and present some of our finding during the development of the clustering algorithm used to categorize crash reports. Last, we discuss some of Monkeys features and compare them to the existing approaches. Also some ideas for the future development of the Monkey system are presented before we conclude that Monkey{\textquoteright}s approach is promising, but some work is still left to establish Monkey in the open source community},
keywords = {automatic, clustering, debugging, GDB, GNUnet, report, Tor},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/main_0.pdf},
@@ -676,12 +676,12 @@ In this paper we describe the problems that occur when debugging widely distribu
school = {Technische Universitaet Muenchen},
type = {Master{\textquoteright}s},
address = {Garching bei Muenchen},
- abstract = {SPDY is a rather new protocol which is an alternative to HTTP. It was designed to address inefficiencies in the latter and thereby improve latency and reduce bandwidth consumption.
-
-This thesis presents the design and implementation of a setup for utilizing SPDY within the anonymizing Tor network for reducing latency and traffic in the latter. A C library implementing the SPDY server protocol is introduced together with an HTTP to SPDY and a SPDY to HTTP proxy which are the base for the presented design.
-
-Furthermore, we focus on the SPDY server push feature which allows servers to send multiple responses to a single request for reducing latency and traffic on loading web pages. We propose a prediction algorithm for employing push at SPDY servers and proxies. The algorithm makes predictions based on previous requests and responses and initially does not know anything about the data which it will push.
-
+ abstract = {SPDY is a rather new protocol which is an alternative to HTTP. It was designed to address inefficiencies in the latter and thereby improve latency and reduce bandwidth consumption.
+
+This thesis presents the design and implementation of a setup for utilizing SPDY within the anonymizing Tor network for reducing latency and traffic in the latter. A C library implementing the SPDY server protocol is introduced together with an HTTP to SPDY and a SPDY to HTTP proxy which are the base for the presented design.
+
+Furthermore, we focus on the SPDY server push feature which allows servers to send multiple responses to a single request for reducing latency and traffic on loading web pages. We propose a prediction algorithm for employing push at SPDY servers and proxies. The algorithm makes predictions based on previous requests and responses and initially does not know anything about the data which it will push.
+
This thesis includes extensive measurement data highlighting the possible benefits of using SPDY instead of HTTP and HTTPS (1.0 or 1.1), especially with respect to networks experiencing latency or loss. Moreover, the real profit from using SPDY within the Tor network on loading some of the most popular web sites is presented. Finally, evaluations of the proposed push prediction algorithm are given for emphasizing the possible gain of employing it at SPDY reverse and forward proxies},
keywords = {anonymity, HTTP, privacy, spdy, Tor},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/uzunov2013torspdy.pdf},
@@ -761,8 +761,7 @@ This thesis includes extensive measurement data highlighting the possible benefi
year = {2012},
month = feb,
address = {Bonaire},
- abstract = {Tor, an anonymity network formed by volunteer nodes, uses the estimated bandwidth of the nodes as a central feature of its path selection algorithm. The current load on nodes is not considered in this algorithm, however, and we ob-
-serve that some nodes persist in being under-utilized or congested. This can degrade the network{\textquoteright}s performance, discourage Tor adoption, and consequently reduce the size of Tor{\textquoteright}s anonymity set. In an effort to reduce congestion and improve load balancing, we propose a congestion-aware path selection algorithm. Using latency as an indicator of congestion, clients use opportunistic and lightweight active measurements to evaluate the congestion state of nodes, and reject nodes that appear congested. Through experiments conducted on the live Tor network, we verify our hypothesis that clients can infer congestion using latency and show that congestion-aware path selection can improve performance},
+ abstract = {Tor, an anonymity network formed by volunteer nodes, uses the estimated bandwidth of the nodes as a central feature of its path selection algorithm. The current load on nodes is not considered in this algorithm, however, and we observe that some nodes persist in being under-utilized or congested. This can degrade the network{\textquoteright}s performance, discourage Tor adoption, and consequently reduce the size of Tor{\textquoteright}s anonymity set. In an effort to reduce congestion and improve load balancing, we propose a congestion-aware path selection algorithm. Using latency as an indicator of congestion, clients use opportunistic and lightweight active measurements to evaluate the congestion state of nodes, and reject nodes that appear congested. Through experiments conducted on the live Tor network, we verify our hypothesis that clients can infer congestion using latency and show that congestion-aware path selection can improve performance},
keywords = {algorithms, Tor, volunteer nodes},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FC\%2712\%20-\%20Congestion-aware\%20Path\%20Selection\%20for\%20Tor.pdf},
www_section = {https://bibliography.gnunet.org},
@@ -788,9 +787,7 @@ serve that some nodes persist in being under-utilized or congested. This can deg
volume = {abs/1202.4503},
year = {2012},
month = feb,
- abstract = {While the Internet was conceived as a decentralized network, the most widely used web applications today tend toward centralization. Control increasingly rests with centralized service providers who, as a consequence, have also amassed unprecedented amounts of data about the behaviors and personalities of individuals.
-Developers, regulators, and consumer advocates have looked to alternative decentralized architectures as the natural response to threats posed by these centralized services. The result has been a great variety of solutions that include personal data stores (PDS), infomediaries, Vendor Relationship Management (VRM) systems, and federated and distributed social networks. And yet, for all these efforts, decentralized personal data architectures have seen little adoption.
-This position paper attempts to account for these failures, challenging the accepted wisdom in the web community on the feasibility and desirability of these approaches. We start with a historical discussion of the development of various categories of decentralized personal data architectures. Then we survey the main ideas to illustrate the common themes among these efforts. We tease apart the design characteristics of these systems from the social values that they (are intended to) promote. We use this understanding to point out numerous drawbacks of the decentralization paradigm, some inherent and others incidental. We end with recommendations for designers of these systems for working towards goals that are achievable, but perhaps more limited in scope and ambition},
+ abstract = {While the Internet was conceived as a decentralized network, the most widely used web applications today tend toward centralization. Control increasingly rests with centralized service providers who, as a consequence, have also amassed unprecedented amounts of data about the behaviors and personalities of individuals. Developers, regulators, and consumer advocates have looked to alternative decentralized architectures as the natural response to threats posed by these centralized services. The result has been a great variety of solutions that include personal data stores (PDS), infomediaries, Vendor Relationship Management (VRM) systems, and federated and distributed social networks. And yet, for all these efforts, decentralized personal data architectures have seen little adoption. This position paper attempts to account for these failures, challenging the accepted wisdom in the web community on the feasibility and desirability of these approaches. We start with a historical discussion of the development of various categories of decentralized personal data architectures. Then we survey the main ideas to illustrate the common themes among these efforts. We tease apart the design characteristics of these systems from the social values that they (are intended to) promote. We use this understanding to point out numerous drawbacks of the decentralization paradigm, some inherent and others incidental. We end with recommendations for designers of these systems for working towards goals that are achievable, but perhaps more limited in scope and ambition},
keywords = {distributed social networks, economics, personal data stores, policy, privacy, web},
www_section = {https://bibliography.gnunet.org},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CoRR\%20-\%20Critical\%20look\%20at\%20decentralization.pdf},
@@ -805,13 +802,7 @@ This position paper attempts to account for these failures, challenging the acce
school = {Technische Universitaet Muenchen},
type = {Masters},
address = {Garching bei Muenchen},
- abstract = {This thesis presents a novel approach for decentralized evaluation of regular expressions for capability discovery in DHT-based overlays. The system provides support for announcing capabilities expressed as regular expressions and discovering participants offering adequate capabilities.
-
-The idea behind our approach is to convert regular expressions into finite automatons and store the corresponding states and transitions in a DHT. We show how locally constructed DFA are merged in the DHT into an NFA without the knowledge of any NFA already present in the DHT and without the need for any central authority. Furthermore we present options of optimizing the DFA.
-
-There exist several possible applications for this general approach of decentralized regular expression evaluation. However, in this thesis we focus on the application of discovering users that are willing to provide network access using a specified protocol to a particular destination.
-
-We have implemented the system for our proposed approach and conducted a simulation. Moreover we present the results of an emulation of the implemented system in a cluster},
+ abstract = {This thesis presents a novel approach for decentralized evaluation of regular expressions for capability discovery in DHT-based overlays. The system provides support for announcing capabilities expressed as regular expressions and discovering participants offering adequate capabilities. The idea behind our approach is to convert regular expressions into finite automatons and store the corresponding states and transitions in a DHT. We show how locally constructed DFA are merged in the DHT into an NFA without the knowledge of any NFA already present in the DHT and without the need for any central authority. Furthermore we present options of optimizing the DFA. There exist several possible applications for this general approach of decentralized regular expression evaluation. However, in this thesis we focus on the application of discovering users that are willing to provide network access using a specified protocol to a particular destination. We have implemented the system for our proposed approach and conducted a simulation. Moreover we present the results of an emulation of the implemented system in a cluster},
keywords = {DFA, distributed hash table, GNUnet, NFA, regular expressions, search},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/szengel2012ms.pdf},
www_section = {https://bibliography.gnunet.org},
@@ -826,11 +817,7 @@ We have implemented the system for our proposed approach and conducted a simulat
school = {TU Munich},
type = {Master{\textquoteright}s},
address = {Garching bei Muenchen},
- abstract = {This thesis presents the design and implementation of the GNU Alternative Domain System (GADS), a decentralized, secure name system providing memorable names for the Internet as an alternative to the Domain Name System (DNS). The system builds on ideas from Rivest{\textquoteright}s Simple Distributed Security Infrastructure (SDSI) to address a central issue with providing a decentralized mapping of secure identifiers to memorable names: providing a global, secure and memorable mapping is impossible without a trusted authority. SDSI offers an alternative by linking local name spaces; GADS uses the transitivity provided by the SDSI design to build a decentralized and censorship resistant name system without a trusted root based on secure delegation of authority.
-
-Additional details need to be considered in order to enable GADS to integrate smoothly with the World Wide Web. While following links on the Web matches following delegations in GADS, the existing HTTP-based infrastructure makes many assumptions about globally unique names; however, proxies can be used to enable legacy applications to function with GADS.
-
-This work presents the fundamental goals and ideas behind GADS, provides technical details on how GADS has been implemented and discusses deployment issues for using GADS with existing systems. We discuss how GADS and legacy DNS can interoperate during a transition period and what additional security advantages GADS offers over DNS with Security Extensions (DNSSEC). Finally, we present the results of a survey into surfing behavior, which suggests that the manual introduction of new direct links in GADS will be infrequent},
+ abstract = {This thesis presents the design and implementation of the GNU Alternative Domain System (GADS), a decentralized, secure name system providing memorable names for the Internet as an alternative to the Domain Name System (DNS). The system builds on ideas from Rivest{\textquoteright}s Simple Distributed Security Infrastructure (SDSI) to address a central issue with providing a decentralized mapping of secure identifiers to memorable names: providing a global, secure and memorable mapping is impossible without a trusted authority. SDSI offers an alternative by linking local name spaces; GADS uses the transitivity provided by the SDSI design to build a decentralized and censorship resistant name system without a trusted root based on secure delegation of authority. Additional details need to be considered in order to enable GADS to integrate smoothly with the World Wide Web. While following links on the Web matches following delegations in GADS, the existing HTTP-based infrastructure makes many assumptions about globally unique names; however, proxies can be used to enable legacy applications to function with GADS. This work presents the fundamental goals and ideas behind GADS, provides technical details on how GADS has been implemented and discusses deployment issues for using GADS with existing systems. We discuss how GADS and legacy DNS can interoperate during a transition period and what additional security advantages GADS offers over DNS with Security Extensions (DNSSEC). Finally, we present the results of a survey into surfing behavior, which suggests that the manual introduction of new direct links in GADS will be infrequent},
keywords = {censorship resistance, decentralized, DNS, GNU Name System, GNUnet},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/schanzen2012msc.pdf},
www_section = {https://bibliography.gnunet.org},
@@ -845,8 +832,8 @@ This work presents the fundamental goals and ideas behind GADS, provides technic
pages = {497--516},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {In this work we revisit the question of basing cryptography on imperfect randomness. Bosley and Dodis (TCC{\textquoteright}07) showed that if a source of randomness R is {\textquotedblleft}good enough{\textquotedblright} to generate a secret key capable of encrypting k bits, then one can deterministically extract nearly k almost uniform bits from R, suggesting that traditional privacy notions (namely, indistinguishability of encryption) requires an {\textquotedblleft}extractable{\textquotedblright} source of randomness. Other, even stronger impossibility results are known for achieving privacy under specific {\textquotedblleft}non-extractable{\textquotedblright} sources of randomness, such as the {\gamma}-Santha-Vazirani (SV) source, where each next bit has fresh entropy, but is allowed to have a small bias {\gamma} < 1 (possibly depending on prior bits).
-We ask whether similar negative results also hold for a more recent notion of privacy called differential privacy (Dwork et al., TCC{\textquoteright}06), concentrating, in particular, on achieving differential privacy with the Santha-Vazirani source. We show that the answer is no. Specifically, we give a differentially private mechanism for approximating arbitrary {\textquotedblleft}low sensitivity{\textquotedblright} functions that works even with randomness coming from a {\gamma}-Santha-Vazirani source, for any {\gamma} < 1. This provides a somewhat surprising {\textquotedblleft}separation{\textquotedblright} between traditional privacy and differential privacy with respect to imperfect randomness.
+ abstract = {In this work we revisit the question of basing cryptography on imperfect randomness. Bosley and Dodis (TCC{\textquoteright}07) showed that if a source of randomness R is {\textquotedblleft}good enough{\textquotedblright} to generate a secret key capable of encrypting k bits, then one can deterministically extract nearly k almost uniform bits from R, suggesting that traditional privacy notions (namely, indistinguishability of encryption) requires an {\textquotedblleft}extractable{\textquotedblright} source of randomness. Other, even stronger impossibility results are known for achieving privacy under specific {\textquotedblleft}non-extractable{\textquotedblright} sources of randomness, such as the {\gamma}-Santha-Vazirani (SV) source, where each next bit has fresh entropy, but is allowed to have a small bias {\gamma} < 1 (possibly depending on prior bits).
+We ask whether similar negative results also hold for a more recent notion of privacy called differential privacy (Dwork et al., TCC{\textquoteright}06), concentrating, in particular, on achieving differential privacy with the Santha-Vazirani source. We show that the answer is no. Specifically, we give a differentially private mechanism for approximating arbitrary {\textquotedblleft}low sensitivity{\textquotedblright} functions that works even with randomness coming from a {\gamma}-Santha-Vazirani source, for any {\gamma} < 1. This provides a somewhat surprising {\textquotedblleft}separation{\textquotedblright} between traditional privacy and differential privacy with respect to imperfect randomness.
Interestingly, the design of our mechanism is quite different from the traditional {\textquotedblleft}additive-noise{\textquotedblright} mechanisms (e.g., Laplace mechanism) successfully utilized to achieve differential privacy with perfect randomness. Indeed, we show that any (non-trivial) {\textquotedblleft}SV-robust{\textquotedblright} mechanism for our problem requires a demanding property called consistent sampling, which is strictly stronger than differential privacy, and cannot be satisfied by any additive-noise mechanism},
isbn = {978-3-642-32008-8},
doi = {10.1007/978-3-642-32009-5_29},
@@ -861,19 +848,19 @@ Interestingly, the design of our mechanism is quite different from the tradition
month = may,
institution = {Technische Universitaet Muenchen},
address = {Garching bei Muenchen},
- abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for
-performance tuning of P2P routing algorithms. This paper introduces
-and evaluates a new efficient method for participants in an
-unstructured P2P network to establish the size of the overall network.
-The presented method is highly efficient, propagating information
-about the current size of the network to all participants using
-O(|E|) operations where |E| is the number of edges in the network.
-Afterwards, all nodes have the same network size estimate, which can
-be made arbitrarily accurate by averaging results from multiple rounds
-of the protocol. Security measures are included which make it
-prohibitively expensive for a typical active participating adversary
-to significantly manipulate the estimates. This paper includes
-experimental results that demonstrate the viability, efficiency and
+ abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for
+performance tuning of P2P routing algorithms. This paper introduces
+and evaluates a new efficient method for participants in an
+unstructured P2P network to establish the size of the overall network.
+The presented method is highly efficient, propagating information
+about the current size of the network to all participants using
+O(|E|) operations where |E| is the number of edges in the network.
+Afterwards, all nodes have the same network size estimate, which can
+be made arbitrarily accurate by averaging results from multiple rounds
+of the protocol. Security measures are included which make it
+prohibitively expensive for a typical active participating adversary
+to significantly manipulate the estimates. This paper includes
+experimental results that demonstrate the viability, efficiency and
accuracy of the protocol},
keywords = {GNUnet, network security, network size estimation, peer-to-peer networking},
journal = {unknown},
@@ -926,7 +913,7 @@ accuracy of the protocol},
year = {2012},
month = apr,
address = {San Jose, CA},
- abstract = {With mobile phones becoming first-class citizens in the online world, the rich location data they bring to the table is set to revolutionize all aspects of online life including content delivery, recommendation systems, and advertising. However, user-tracking is a concern with such location-based services, not only because location data can be linked uniquely to individuals, but because the low-level nature of current location APIs and the resulting dependence on the cloud to synthesize useful representations virtually guarantees such tracking.
+ abstract = {With mobile phones becoming first-class citizens in the online world, the rich location data they bring to the table is set to revolutionize all aspects of online life including content delivery, recommendation systems, and advertising. However, user-tracking is a concern with such location-based services, not only because location data can be linked uniquely to individuals, but because the low-level nature of current location APIs and the resulting dependence on the cloud to synthesize useful representations virtually guarantees such tracking.
In this paper, we propose privacy-preserving location-based matching as a fundamental platform primitive and as an alternative to exposing low-level, latitude-longitude (lat-long) coordinates to applications. Applications set rich location-based triggers and have these be fired based on location updates either from the local device or from a remote device (e.g., a friend{\textquoteright}s phone). Our Koi platform, comprising a privacy-preserving matching service in the cloud and a phone-based agent, realizes this primitive across multiple phone and browser platforms. By masking low-level lat-long information from applications, Koi not only avoids leaking privacy-sensitive information, it also eases the task of programmers by providing a higher-level abstraction that is easier for applications to build upon. Koi{\textquoteright}s privacy-preserving protocol prevents the cloud service from tracking users. We verify the non-tracking properties of Koi using a theorem prover, illustrate how privacy guarantees can easily be added to a wide range of location-based applications, and show that our public deployment is performant, being able to perform 12K matches per second on a single core},
keywords = {location privacy, matching},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/nsdi12-koi.pdf},
@@ -941,8 +928,8 @@ In this paper, we propose privacy-preserving location-based matching as a fundam
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
address = {San Francisco, CA, USA},
- abstract = {Popular anonymous communication systems often require sending packets through a sequence of relays on dilated paths for strong anonymity protection. As a result, increased end-to-end latency renders such systems inadequate for the majority of Internet users who seek an intermediate level of anonymity protection while using latency-sensitive applications, such as Web applications. This paper serves to bridge the gap between communication systems that provide strong anonymity protection but with intolerable latency and non-anonymous communication systems by considering a new design space for the setting. More specifically, we explore how to achieve near-optimal latency while achieving an intermediate level of anonymity with a weaker yet practical adversary model (i.e., protecting an end-host{\textquoteright}s identity and location from servers)
-such that users can choose between the level of anonymity and usability. We propose Lightweight Anonymity and Privacy (LAP), an efficient network-based solution featuring lightweight path establishment and stateless communication, by concealing an end-host{\textquoteright}s topological location to enhance anonymity against
+ abstract = {Popular anonymous communication systems often require sending packets through a sequence of relays on dilated paths for strong anonymity protection. As a result, increased end-to-end latency renders such systems inadequate for the majority of Internet users who seek an intermediate level of anonymity protection while using latency-sensitive applications, such as Web applications. This paper serves to bridge the gap between communication systems that provide strong anonymity protection but with intolerable latency and non-anonymous communication systems by considering a new design space for the setting. More specifically, we explore how to achieve near-optimal latency while achieving an intermediate level of anonymity with a weaker yet practical adversary model (i.e., protecting an end-host{\textquoteright}s identity and location from servers)
+such that users can choose between the level of anonymity and usability. We propose Lightweight Anonymity and Privacy (LAP), an efficient network-based solution featuring lightweight path establishment and stateless communication, by concealing an end-host{\textquoteright}s topological location to enhance anonymity against
remote tracking. To show practicality, we demonstrate that LAP can work on top of the current Internet and proposed future Internet architectures},
keywords = {anonymous communication anonymity protection, LAP},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/LAP\%3A\%20Lightweight\%20Anonymity\%20and\%20Privacy.pdf},
@@ -957,37 +944,37 @@ remote tracking. To show practicality, we demonstrate that LAP can work on top o
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
address = {San Francisco, CA, USA},
- abstract = {The widely used Tor anonymity network is designed
-to enable low-latency anonymous communication. However, in
-practice, interactive communication on Tor{\textemdash}which accounts for
-over 90\% of connections in the Tor network [1]{\textemdash}incurs latencies
-over 5x greater than on the direct Internet path. In addition, since
-path selection to establish a circuit in Tor is oblivious to Internet
-routing, anonymity guarantees can breakdown in cases where an
-autonomous system (AS) can correlate traffic across the entry
-and exit segments of a circuit.
-In this paper, we show that both of these shortcomings in Tor
-can be addressed with only client-side modifications, i.e., without
-requiring a revamp of the entire Tor architecture. To this end,
-we design and implement a new Tor client, LASTor. First, we
-show that LASTor can deliver significant latency gains over the
-default Tor client by simply accounting for the inferred locations
-of Tor relays while choosing paths. Second, since the preference
-for low latency paths reduces the entropy of path selection,
-we design LASTor{\textquoteright}s path selection algorithm to be tunable. A
-user can choose an appropriate tradeoff between latency and
-anonymity by specifying a value between 0 (lowest latency) and
-1 (highest anonymity) for a single parameter. Lastly, we develop
-an efficient and accurate algorithm to identify paths on which
-an AS can correlate traffic between the entry and exit segments.
-This algorithm enables LASTor to avoid such paths and improve a
-user{\textquoteright}s anonymity, while the low runtime of the algorithm ensures
-that the impact on end-to-end latency of communication is low.
-By applying our techniques to measurements of real Internet
-paths and by using LASTor to visit the top 200 websites from
-several geographically-distributed end-hosts, we show that, in
-comparison to the default Tor client, LASTor reduces median
-latencies by 25\% while also reducing the false negative rate of
+ abstract = {The widely used Tor anonymity network is designed
+to enable low-latency anonymous communication. However, in
+practice, interactive communication on Tor{\textemdash}which accounts for
+over 90\% of connections in the Tor network [1]{\textemdash}incurs latencies
+over 5x greater than on the direct Internet path. In addition, since
+path selection to establish a circuit in Tor is oblivious to Internet
+routing, anonymity guarantees can breakdown in cases where an
+autonomous system (AS) can correlate traffic across the entry
+and exit segments of a circuit.
+In this paper, we show that both of these shortcomings in Tor
+can be addressed with only client-side modifications, i.e., without
+requiring a revamp of the entire Tor architecture. To this end,
+we design and implement a new Tor client, LASTor. First, we
+show that LASTor can deliver significant latency gains over the
+default Tor client by simply accounting for the inferred locations
+of Tor relays while choosing paths. Second, since the preference
+for low latency paths reduces the entropy of path selection,
+we design LASTor{\textquoteright}s path selection algorithm to be tunable. A
+user can choose an appropriate tradeoff between latency and
+anonymity by specifying a value between 0 (lowest latency) and
+1 (highest anonymity) for a single parameter. Lastly, we develop
+an efficient and accurate algorithm to identify paths on which
+an AS can correlate traffic between the entry and exit segments.
+This algorithm enables LASTor to avoid such paths and improve a
+user{\textquoteright}s anonymity, while the low runtime of the algorithm ensures
+that the impact on end-to-end latency of communication is low.
+By applying our techniques to measurements of real Internet
+paths and by using LASTor to visit the top 200 websites from
+several geographically-distributed end-hosts, we show that, in
+comparison to the default Tor client, LASTor reduces median
+latencies by 25\% while also reducing the false negative rate of
not detecting a potential snooping AS from 57\% to 11\%},
keywords = {anonymous communication, as, autonomous system, Tor},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/LASTor\%3A\%20A\%20Low-Latency\%20AS-Aware\%20Tor\%20Client.pdf},
@@ -1003,10 +990,10 @@ not detecting a potential snooping AS from 57\% to 11\%},
pages = {321--338},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {This paper is about private data analysis, in which a trusted curator holding a confidential database responds to real vector-valued queries. A common approach to ensuring privacy for the database elements is to add appropriately generated random noise to the answers, releasing only these noisy responses. A line of study initiated in [7] examines the amount of distortion needed to prevent privacy violations of various kinds. The results in the literature vary according to several parameters, including the size of the database, the size of the universe from which data elements are drawn, the {\textquotedblleft}amount{\textquotedblright} of privacy desired, and for the purposes of the current work, the arity of the query. In this paper we sharpen and unify these bounds. Our foremost result combines the techniques of Hardt and Talwar [11] and McGregor et al. [13] to obtain linear lower bounds on distortion when providing differential privacy for a (contrived) class of low-sensitivity queries. (A query has low sensitivity if the data of a single individual has small effect on the answer.) Several structural results follow as immediate corollaries:
-We separate so-called counting queries from arbitrary low-sensitivity queries, proving the latter requires more noise, or distortion, than does the former;
-We separate ({\epsilon},0)-differential privacy from its well-studied relaxation ({\epsilon},{\delta})-differential privacy, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database, proving the latter requires less distortion than the former;
-We demonstrate that ({\epsilon},{\delta})-differential privacy is much weaker than ({\epsilon},0)-differential privacy in terms of mutual information of the transcript of the mechanism with the database, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database.
+ abstract = {This paper is about private data analysis, in which a trusted curator holding a confidential database responds to real vector-valued queries. A common approach to ensuring privacy for the database elements is to add appropriately generated random noise to the answers, releasing only these noisy responses. A line of study initiated in [7] examines the amount of distortion needed to prevent privacy violations of various kinds. The results in the literature vary according to several parameters, including the size of the database, the size of the universe from which data elements are drawn, the {\textquotedblleft}amount{\textquotedblright} of privacy desired, and for the purposes of the current work, the arity of the query. In this paper we sharpen and unify these bounds. Our foremost result combines the techniques of Hardt and Talwar [11] and McGregor et al. [13] to obtain linear lower bounds on distortion when providing differential privacy for a (contrived) class of low-sensitivity queries. (A query has low sensitivity if the data of a single individual has small effect on the answer.) Several structural results follow as immediate corollaries:
+We separate so-called counting queries from arbitrary low-sensitivity queries, proving the latter requires more noise, or distortion, than does the former;
+We separate ({\epsilon},0)-differential privacy from its well-studied relaxation ({\epsilon},{\delta})-differential privacy, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database, proving the latter requires less distortion than the former;
+We demonstrate that ({\epsilon},{\delta})-differential privacy is much weaker than ({\epsilon},0)-differential privacy in terms of mutual information of the transcript of the mechanism with the database, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database.
We also simplify the lower bounds on noise for counting queries in [11] and also make them unconditional. Further, we use a characterization of ({\epsilon},{\delta}) differential privacy from [13] to obtain lower bounds on the distortion needed to ensure ({\epsilon},{\delta})-differential privacy for {\epsilon},{\delta} > 0. We next revisit the LP decoding argument of [10] and combine it with a recent result of Rudelson [15] to improve on a result of Kasiviswanathan et al. [12] on noise lower bounds for privately releasing l-way marginals},
keywords = {Differential Privacy, LP decoding},
isbn = {978-3-642-28913-2},
@@ -1036,7 +1023,7 @@ We also simplify the lower bounds on noise for counting queries in [11] and also
school = {Technische Universitaet Muenchen},
type = {Masters},
address = {Garching bei Muenchen},
- abstract = {Debugging is tedious and time consuming work that, for certain types of bugs, can and should be automated. Debugging distributed systems is more complex due to time dependencies between interacting processes. Another related problem is duplicate bug reports in bug repositories. Finding bug duplicates is hard and wastes developers{\textquoteright} time which may affect the development team{\textquoteright}s rate of bug fixes and new releases.
+ abstract = {Debugging is tedious and time consuming work that, for certain types of bugs, can and should be automated. Debugging distributed systems is more complex due to time dependencies between interacting processes. Another related problem is duplicate bug reports in bug repositories. Finding bug duplicates is hard and wastes developers{\textquoteright} time which may affect the development team{\textquoteright}s rate of bug fixes and new releases.
In this master thesis we introduce Monkey, a new tool that provides a solution for automated classification, investigation and characterization of bugs, as well as a solution for comparing bug reports and avoiding duplicates. Our tool is particularly suitable for distributed systems due to its autonomy. We present Monkey{\textquoteright}s key design goals and architecture and give experimental results demonstrating the viability of our approach},
keywords = {automation, debugging, distributed systems},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/safey-thesis-monkey.pdf , https://gnunet.org/git/bibliography.git/tree/docs/safey-presentation-monkey.pdf},
@@ -1047,10 +1034,10 @@ In this master thesis we introduce Monkey, a new tool that provides a solution f
title = {NTALG--TCP NAT traversal with application-level gateways},
booktitle = {Consumer Communications and Networking Conference (CCNC), 2012 IEEE},
year = {2012},
- abstract = {Consumer computers or home communication devices are usually connected to the Internet via a Network Address Translation (NAT) router. This imposes restrictions for networking applications that require inbound connections.
-Existing solutions for NAT traversal can remedy the restrictions, but still there is a fraction of home users which lack support of it, especially when it comes to TCP. We present a framework
-for traversing NAT routers by exploiting their built-in FTP and IRC application-level gateways (ALG) for arbitrary TCP-based applications. While this does not work in every scenario, it
-significantly improves the success chance without requiring any user interaction at all. To demonstrate the framework, we show
+ abstract = {Consumer computers or home communication devices are usually connected to the Internet via a Network Address Translation (NAT) router. This imposes restrictions for networking applications that require inbound connections.
+Existing solutions for NAT traversal can remedy the restrictions, but still there is a fraction of home users which lack support of it, especially when it comes to TCP. We present a framework
+for traversing NAT routers by exploiting their built-in FTP and IRC application-level gateways (ALG) for arbitrary TCP-based applications. While this does not work in every scenario, it
+significantly improves the success chance without requiring any user interaction at all. To demonstrate the framework, we show
a small test setup with laptop computers and home NAT routers},
keywords = {FTP-ALG, NAT},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WHW_12-NTALG.pdf},
@@ -1075,9 +1062,9 @@ a small test setup with laptop computers and home NAT routers},
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
address = {San Francisco, CA, USA},
- abstract = {We consider the setting of HTTP traffic over encrypted tunnels, as used to conceal the identity of websites visited by a user. It is well known that traffic analysis (TA) attacks can accurately identify the website a user visits despite the use of encryption, and previous work has looked at specific attack/countermeasure pairings. We provide the first comprehensive analysis of general-purpose TA countermeasures. We show that nine known countermeasures are vulnerable to simple attacks that exploit coarse features of traffic (e.g., total time and bandwidth). The considered countermeasures
-include ones like those standardized by TLS, SSH, and IPsec, and even more complex ones like the traffic morphing scheme of Wright et al. As just one of our results, we show that despite the use of traffic morphing, one can use only
-total upstream and downstream bandwidth to identify {\textemdash}with 98\% accuracy{\textemdash} which of two websites was visited. One implication of what we find is that, in the context of website identification, it is unlikely that bandwidth-efficient, general-
+ abstract = {We consider the setting of HTTP traffic over encrypted tunnels, as used to conceal the identity of websites visited by a user. It is well known that traffic analysis (TA) attacks can accurately identify the website a user visits despite the use of encryption, and previous work has looked at specific attack/countermeasure pairings. We provide the first comprehensive analysis of general-purpose TA countermeasures. We show that nine known countermeasures are vulnerable to simple attacks that exploit coarse features of traffic (e.g., total time and bandwidth). The considered countermeasures
+include ones like those standardized by TLS, SSH, and IPsec, and even more complex ones like the traffic morphing scheme of Wright et al. As just one of our results, we show that despite the use of traffic morphing, one can use only
+total upstream and downstream bandwidth to identify {\textemdash}with 98\% accuracy{\textemdash} which of two websites was visited. One implication of what we find is that, in the context of website identification, it is unlikely that bandwidth-efficient, general-
purpose TA countermeasures can ever provide the type of security targeted in prior work},
keywords = {encrypted traffic, machine learning, padding, privacy, traffic analysis countermeasures},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Peek-a-Boo\%2C\%20I\%20Still\%20See\%20You\%3A\%20Why\%20Efficient\%20Traffic\%20Analysis\%20Countermeasures\%20Fail.pdf},
@@ -1131,17 +1118,17 @@ purpose TA countermeasures can ever provide the type of security targeted in pri
year = {2012},
month = jul,
chapter = {1313},
- abstract = {In this paper, we present Saturn, an overlay architecture for large-scale data networks maintained over Distributed Hash
-Tables (DHTs) that efficiently processes range queries and ensures access load balancing and fault-tolerance. Placing consecutive
-data values in neighboring peers is desirable in DHTs since it accelerates range query processing; however, such a placement is highly
-susceptible to load imbalances. At the same time, DHTs may be susceptible to node departures/failures and high data availability and
-fault tolerance are significant issues. Saturn deals effectively with these problems through the introduction of a novel multiple ring,
-order-preserving architecture. The use of a novel order-preserving hash function ensures fast range query processing. Replication
-across and within data rings (termed vertical and horizontal replication) forms the foundation over which our mechanisms are
-developed, ensuring query load balancing and fault tolerance, respectively. Our detailed experimentation study shows strong gains in
-range query processing efficiency, access load balancing, and fault tolerance, with low replication overheads. The significance of
-Saturn is not only that it effectively tackles all three issues together{\textemdash}i.e., supporting range queries, ensuring load balancing, and
-providing fault tolerance over DHTs{\textemdash}but also that it can be applied on top of any order-preserving DHT enabling it to dynamically
+ abstract = {In this paper, we present Saturn, an overlay architecture for large-scale data networks maintained over Distributed Hash
+Tables (DHTs) that efficiently processes range queries and ensures access load balancing and fault-tolerance. Placing consecutive
+data values in neighboring peers is desirable in DHTs since it accelerates range query processing; however, such a placement is highly
+susceptible to load imbalances. At the same time, DHTs may be susceptible to node departures/failures and high data availability and
+fault tolerance are significant issues. Saturn deals effectively with these problems through the introduction of a novel multiple ring,
+order-preserving architecture. The use of a novel order-preserving hash function ensures fast range query processing. Replication
+across and within data rings (termed vertical and horizontal replication) forms the foundation over which our mechanisms are
+developed, ensuring query load balancing and fault tolerance, respectively. Our detailed experimentation study shows strong gains in
+range query processing efficiency, access load balancing, and fault tolerance, with low replication overheads. The significance of
+Saturn is not only that it effectively tackles all three issues together{\textemdash}i.e., supporting range queries, ensuring load balancing, and
+providing fault tolerance over DHTs{\textemdash}but also that it can be applied on top of any order-preserving DHT enabling it to dynamically
handle replication and, thus, to trade off replication costs for fair load distribution and fault tolerance},
keywords = {distributed hash table, load balancing, range queries, Saturn},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/saturn-range-dht.pdf},
@@ -1213,7 +1200,7 @@ handle replication and, thus, to trade off replication costs for fair load distr
year = {2011},
month = jul,
address = {Waterloo, Canada},
- abstract = {We give a critical analysis of the system-wide anonymity metric of Edman et al. [3], which is based on the permanent value of a doubly-stochastic matrix. By providing an intuitive understanding of the permanent of such a matrix, we show that a metric that looks no further than this composite value is at best a rough indicator of anonymity. We identify situations where its inaccuracy is acute, and reveal a better anonymity indicator. Also, by constructing an information-preserving embedding of a smaller class of attacks into the wider class for which this metric was proposed, we show that this metric fails to possess desirable
+ abstract = {We give a critical analysis of the system-wide anonymity metric of Edman et al. [3], which is based on the permanent value of a doubly-stochastic matrix. By providing an intuitive understanding of the permanent of such a matrix, we show that a metric that looks no further than this composite value is at best a rough indicator of anonymity. We identify situations where its inaccuracy is acute, and reveal a better anonymity indicator. Also, by constructing an information-preserving embedding of a smaller class of attacks into the wider class for which this metric was proposed, we show that this metric fails to possess desirable
generalization properties. Finally, we present a new anonymity metric that does not exhibit these shortcomings. Our new metric is accurate as well as general},
keywords = {combinatorial matrix theory, probabilistic attacks, system-wide anonymity metric},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/PETS\%2711\%20-\%20An\%20Accurate\%20System-Wide\%20Anonymity\%20Metric\%20for\%20Probabilistic\%20Attacks.pdf},
@@ -1239,7 +1226,7 @@ generalization properties. Finally, we present a new anonymity metric that does
year = {2011},
month = feb,
address = {St. Lucia},
- abstract = {Anonymous blacklisting schemes allow online service providers to prevent future anonymous access by abusive users while preserving the privacy of all anonymous users (both abusive and non-abusive). The first scheme proposed for this purpose was Nymble, an extremely efficient scheme based only on symmetric primitives; however, Nymble relies on trusted third parties who can collude to de-anonymize users of the scheme. Two recently proposed schemes, Nymbler and Jack, reduce the trust placed in these third parties at the expense of using less-efficient asymmetric crypto primitives. We present BNymble, a scheme which matches the anonymity guarantees of Nymbler and Jack while (nearly) maintaining the efficiency of the original Nymble. The key insight of
+ abstract = {Anonymous blacklisting schemes allow online service providers to prevent future anonymous access by abusive users while preserving the privacy of all anonymous users (both abusive and non-abusive). The first scheme proposed for this purpose was Nymble, an extremely efficient scheme based only on symmetric primitives; however, Nymble relies on trusted third parties who can collude to de-anonymize users of the scheme. Two recently proposed schemes, Nymbler and Jack, reduce the trust placed in these third parties at the expense of using less-efficient asymmetric crypto primitives. We present BNymble, a scheme which matches the anonymity guarantees of Nymbler and Jack while (nearly) maintaining the efficiency of the original Nymble. The key insight of
BNymble is that we can achieve the anonymity goals of these more recent schemes by replacing only the infrequent {\textquotedblleft}User Registration{\textquotedblright} protocol from Nymble with asymmetric primitives. We prove the security of BNymble, and report on its efficiency},
keywords = {anonymous access, anonymous blacklisting, BNymble},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FC\%2711\%20-\%20BNymble.pdf},
@@ -1267,8 +1254,8 @@ BNymble is that we can achieve the anonymity goals of these more recent schemes
publisher = {ACM},
organization = {ACM},
address = {Chicago, IL, United States},
- abstract = {Many users face surveillance of their Internet communications and a significant fraction suffer from outright blocking of certain destinations. Anonymous communication systems allow users to conceal the destinations they communicate with, but do not hide the fact that the users are using them. The mere use of such systems may invite suspicion, or access to them may be blocked.
-We therefore propose Cirripede, a system that can be used for unobservable communication with Internet destinations. Cirripede is designed to be deployed by ISPs; it intercepts connections from clients to innocent-looking destinations and redirects them to the true destination requested by the client. The communication is encoded in a way that is indistinguishable from normal communications to anyone without the master secret key, while public-key cryptography is used to eliminate the need for any secret information that must be shared with Cirripede users.
+ abstract = {Many users face surveillance of their Internet communications and a significant fraction suffer from outright blocking of certain destinations. Anonymous communication systems allow users to conceal the destinations they communicate with, but do not hide the fact that the users are using them. The mere use of such systems may invite suspicion, or access to them may be blocked.
+We therefore propose Cirripede, a system that can be used for unobservable communication with Internet destinations. Cirripede is designed to be deployed by ISPs; it intercepts connections from clients to innocent-looking destinations and redirects them to the true destination requested by the client. The communication is encoded in a way that is indistinguishable from normal communications to anyone without the master secret key, while public-key cryptography is used to eliminate the need for any secret information that must be shared with Cirripede users.
Cirripede is designed to work scalably with routers that handle large volumes of traffic while imposing minimal overhead on ISPs and not disrupting existing traffic. This allows Cirripede proxies to be strategically deployed at central locations, making access to Cirripede very difficult to block. We built a proof-of-concept implementation of Cirripede and performed a testbed evaluation of its performance properties},
keywords = {censorship-resistance, unobservability},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CCS\%2711\%20-\%20Cirripede.pdf},
@@ -1296,15 +1283,15 @@ Cirripede is designed to work scalably with routers that handle large volumes of
month = jan,
institution = {INRIA Rocquencourt},
address = {Le Chensay Cedex},
- abstract = {Eventual consistency aims to ensure that replicas of some mutable shared
-object converge without foreground synchronisation. Previous approaches to eventual con-
-sistency are ad-hoc and error-prone. We study a principled approach: to base the design of
-shared data types on some simple formal conditions that are sufficient to guarantee even-
-tual consistency. We call these types Convergent or Commutative Replicated Data Types
-(CRDTs). This paper formalises asynchronous object replication, either state based or op-
-eration based, and provides a sufficient condition appropriate for each case. It describes
-several useful CRDTs, including container data types supporting both add and remove op-
-erations with clean semantics, and more complex types such as graphs, montonic DAGs,
+ abstract = {Eventual consistency aims to ensure that replicas of some mutable shared
+object converge without foreground synchronisation. Previous approaches to eventual con-
+sistency are ad-hoc and error-prone. We study a principled approach: to base the design of
+shared data types on some simple formal conditions that are sufficient to guarantee even-
+tual consistency. We call these types Convergent or Commutative Replicated Data Types
+(CRDTs). This paper formalises asynchronous object replication, either state based or op-
+eration based, and provides a sufficient condition appropriate for each case. It describes
+several useful CRDTs, including container data types supporting both add and remove op-
+erations with clean semantics, and more complex types such as graphs, montonic DAGs,
and sequences. It discusses some properties needed to implement non-trivial CRDTs},
keywords = {commutative operations, data replication, optimistic replication},
journal = {unknown},
@@ -1334,7 +1321,7 @@ and sequences. It discusses some properties needed to implement non-trivial CRDT
year = {2011},
month = aug,
address = {San Francisco, CA, USA},
- abstract = {We present decoy routing, a mechanism capable of circumventing common network filtering strategies. Unlike other circumvention techniques, decoy routing does not require a client to connect to a specific IP address (which
+ abstract = {We present decoy routing, a mechanism capable of circumventing common network filtering strategies. Unlike other circumvention techniques, decoy routing does not require a client to connect to a specific IP address (which
is easily blocked) in order to provide circumvention. We show that if it is possible for a client to connect to any unblocked host/service, then decoy routing could be used to connect them to a blocked destination without cooperation from the host. This is accomplished by placing the circumvention service in the network itself -- where a single device could proxy traffic between a significant fraction of hosts -- instead of at the edge},
keywords = {decoy routing, Internet communication, network filter},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/FOCI\%2711\%20-\%20Decoy\%20Routing\%3A\%20Toward\%20Unblockable\%20Internet\%20Communication.pdf},
@@ -1358,10 +1345,10 @@ is easily blocked) in order to provide circumvention. We show that if it is poss
journal = {CoRR},
volume = {abs/1103.2626},
year = {2011},
- abstract = {We examine the combination of two directions in the field of privacy concerning computations over distributed private inputs--secure function evaluation (SFE) and differential privacy. While in both the goal is to privately evaluate some function of the individual inputs, the privacy requirements are significantly different. The general feasibility results for SFE suggest a natural paradigm for implementing differentially private analyses distributively: First choose what to compute, i.e., a differentially private analysis; Then decide how to compute it, i.e., construct an SFE protocol for this analysis.
-We initiate an examination whether there are advantages to a paradigm where both decisions are made simultaneously. In particular, we investigate under which accuracy requirements it is beneficial to adapt this paradigm for computing a collection of functions including binary sum, gap threshold, and approximate median queries. Our results imply that when computing the binary sum of n distributed inputs then:
-* When we require that the error is o(n{\surd}) and the number of rounds is constant, there is no benefit in the new paradigm.
-* When we allow an error of O(n{\surd}), the new paradigm yields more efficient protocols when we consider protocols that compute symmetric functions.
+ abstract = {We examine the combination of two directions in the field of privacy concerning computations over distributed private inputs--secure function evaluation (SFE) and differential privacy. While in both the goal is to privately evaluate some function of the individual inputs, the privacy requirements are significantly different. The general feasibility results for SFE suggest a natural paradigm for implementing differentially private analyses distributively: First choose what to compute, i.e., a differentially private analysis; Then decide how to compute it, i.e., construct an SFE protocol for this analysis.
+We initiate an examination whether there are advantages to a paradigm where both decisions are made simultaneously. In particular, we investigate under which accuracy requirements it is beneficial to adapt this paradigm for computing a collection of functions including binary sum, gap threshold, and approximate median queries. Our results imply that when computing the binary sum of n distributed inputs then:
+* When we require that the error is o(n{\surd}) and the number of rounds is constant, there is no benefit in the new paradigm.
+* When we allow an error of O(n{\surd}), the new paradigm yields more efficient protocols when we consider protocols that compute symmetric functions.
Our results also yield new separations between the local and global models of computations for private data analysis},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/DistributedPrivateData2008Beimel.pdf},
www_section = {https://bibliography.gnunet.org},
@@ -1373,7 +1360,7 @@ Our results also yield new separations between the local and global models of co
year = {2011},
month = aug,
address = {San Francisco, CA, USA},
- abstract = {Tor is one of the most widely-used privacy enhancing technologies for achieving online anonymity and resisting censorship. Simultaneously, Tor is also an evolving research network on which investigators perform experiments to improve the network{\textquoteright}s resilience to attacks and enhance its performance. Existing methods for studying Tor have included analytical modeling, simulations, small-scale network emulations, small-scale PlanetLab deployments, and measurement and analysis of the live Tor network. Despite the growing body of work concerning Tor, there is no widely accepted methodology for conducting Tor research in a manner that preserves realism while protecting live users{\textquoteright} privacy. In an effort to propose a standard, rigorous experimental framework for
+ abstract = {Tor is one of the most widely-used privacy enhancing technologies for achieving online anonymity and resisting censorship. Simultaneously, Tor is also an evolving research network on which investigators perform experiments to improve the network{\textquoteright}s resilience to attacks and enhance its performance. Existing methods for studying Tor have included analytical modeling, simulations, small-scale network emulations, small-scale PlanetLab deployments, and measurement and analysis of the live Tor network. Despite the growing body of work concerning Tor, there is no widely accepted methodology for conducting Tor research in a manner that preserves realism while protecting live users{\textquoteright} privacy. In an effort to propose a standard, rigorous experimental framework for
conducting Tor research in a way that ensures safety and realism, we present the design of ExperimenTor, a large-scale Tor network emulation toolkit and testbed. We also report our early experiences with prototype testbeds currently deployed at four research institutions},
keywords = {experimentation, ExperimenTor, privacy enhancing technologies, Tor},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CSET\%2711\%20-\%20ExperimenTor.pdf},
@@ -1386,7 +1373,7 @@ conducting Tor research in a way that ensures safety and realism, we present the
year = {2011},
month = dec,
address = {Orlando, FL, USA},
- abstract = {Tor is a volunteer-operated network of application-layer relays that enables users to communicate privately and anonymously. Unfortunately, Tor often exhibits poor performance due to congestion caused by the unbalanced ratio of clients to available relays, as well as a disproportionately high consumption of network capacity by a small fraction of filesharing users.
+ abstract = {Tor is a volunteer-operated network of application-layer relays that enables users to communicate privately and anonymously. Unfortunately, Tor often exhibits poor performance due to congestion caused by the unbalanced ratio of clients to available relays, as well as a disproportionately high consumption of network capacity by a small fraction of filesharing users.
This paper argues the very counterintuitive notion that slowing down traffic on Tor will increase the bandwidth capacity of the network and consequently improve the experience of interactive web users. We introduce Tortoise, a system for rate limiting Tor at its ingress points. We demonstrate that Tortoise incurs little penalty for interactive web users, while significantly decreasing the throughput for filesharers. Our techniques provide incentives to filesharers to configure their Tor clients to also relay traffic, which in turn improves the network{\textquoteright}s overall performance. We present large-scale emulation results that indicate that interactive users will achieve a significant speedup if even a small fraction of clients opt to run relays},
keywords = {anonymity, performance, Tor},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ACSAC\%2711\%20-\%20Tortoise.pdf},
@@ -1399,7 +1386,7 @@ This paper argues the very counterintuitive notion that slowing down traffic on
year = {2011},
month = dec,
address = {Orlando, FL, USA},
- abstract = {Traffic watermarking is an important element in many network security and privacy applications, such as tracing botnet C\&C communications and deanonymizing peer-to-peer VoIP calls. The state-of-the-art traffic watermarking schemes are usually based on packet timing information and they are notoriously difficult to detect. In this paper, we show for the first time that even the most sophisticated timing-based watermarking schemes (e.g., RAINBOW and SWIRL) are not invisible by proposing a new detection system called BACKLIT. BACKLIT is designed according to the observation that any practical timing-based traffic watermark will cause noticeable alterations in the intrinsic timing features typical of TCP flows. We propose five metrics that are sufficient for detecting four state-of-the-art traffic watermarks for bulk transfer and interactive traffic. BACKLIT can be easily deployed in stepping stones and anonymity networks (e.g., Tor), because it does not rely on strong assumptions and can be realized in an active or passive mode. We have conducted extensive experiments to evaluate BACKLIT{\textquoteright}s detection performance using the PlanetLab platform. The results show that BACKLIT can detect watermarked network flows
+ abstract = {Traffic watermarking is an important element in many network security and privacy applications, such as tracing botnet C\&C communications and deanonymizing peer-to-peer VoIP calls. The state-of-the-art traffic watermarking schemes are usually based on packet timing information and they are notoriously difficult to detect. In this paper, we show for the first time that even the most sophisticated timing-based watermarking schemes (e.g., RAINBOW and SWIRL) are not invisible by proposing a new detection system called BACKLIT. BACKLIT is designed according to the observation that any practical timing-based traffic watermark will cause noticeable alterations in the intrinsic timing features typical of TCP flows. We propose five metrics that are sufficient for detecting four state-of-the-art traffic watermarks for bulk transfer and interactive traffic. BACKLIT can be easily deployed in stepping stones and anonymity networks (e.g., Tor), because it does not rely on strong assumptions and can be realized in an active or passive mode. We have conducted extensive experiments to evaluate BACKLIT{\textquoteright}s detection performance using the PlanetLab platform. The results show that BACKLIT can detect watermarked network flows
with high accuracy and few false positives},
keywords = {BACKLIT, detection system, invisible, network security, packet timing information, privacy, traffic watermark},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ACSAC\%2711\%20-\%20BACKLIT.pdf},
@@ -1414,7 +1401,7 @@ with high accuracy and few false positives},
publisher = {ACM},
organization = {ACM},
address = {Chicago, IL, United States},
- abstract = {We introduce Faust, a solution to the {\textquotedblleft}anonymous blacklisting problem:{\textquotedblright} allow an anonymous user to prove that she is authorized to access an online service such that if the user misbehaves, she retains her anonymity but will be unable to
+ abstract = {We introduce Faust, a solution to the {\textquotedblleft}anonymous blacklisting problem:{\textquotedblright} allow an anonymous user to prove that she is authorized to access an online service such that if the user misbehaves, she retains her anonymity but will be unable to
authenticate in future sessions. Faust uses no trusted third parties and is one to two orders of magnitude more efficient than previous schemes without trusted third parties. The key idea behind Faust is to eliminate the explicit blacklist used in all previous approaches, and rely instead on an implicit whitelist, based on blinded authentication tokens},
keywords = {anonymous authentication, anonymous blacklisting, privacy-enhancing revocation},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WPES\%2711\%20-\%20FAUST.pdf},
@@ -1444,9 +1431,9 @@ authenticate in future sessions. Faust uses no trusted third parties and is one
year = {2011},
month = may,
address = {San Francisco, CA, USA},
- abstract = {Anonymous communications networks, such as Tor, help to solve the real and important problem of enabling users to communicate privately over the Internet. However, in doing so, anonymous communications networks introduce an entirely new problem for the service providers{\textemdash}such as websites, IRC networks or mail servers{\textemdash}with which these users interact; in particular, since all anonymous users look alike, there is no way for the service providers to hold individual misbehaving anonymous users accountable for their actions. Recent
-research efforts have focused on using anonymous blacklisting systems (which are sometimes called anonymous revocation systems) to empower service providers with the ability to revoke access from abusive anonymous users. In contrast to revocable anonymity systems, which enable some trusted third party to deanonymize users, anonymous blacklisting systems provide users with a way to authenticate anonymously with a service provider, while enabling the service provider to revoke access from any users that misbehave, without revealing their identities. In this paper, we introduce the anonymous blacklisting
-problem and survey the literature on anonymous blacklisting systems, comparing and contrasting the architecture of various existing schemes, and discussing the tradeoffs inherent with each design. The literature on anonymous blacklisting systems lacks a unified set of definitions; each scheme operates under different trust assumptions and provides different security and privacy guarantees. Therefore, before we discuss the existing approaches in detail, we first propose a formal definition for anonymous blacklisting systems, and a set of security and privacy properties that these systems should possess. We also
+ abstract = {Anonymous communications networks, such as Tor, help to solve the real and important problem of enabling users to communicate privately over the Internet. However, in doing so, anonymous communications networks introduce an entirely new problem for the service providers{\textemdash}such as websites, IRC networks or mail servers{\textemdash}with which these users interact; in particular, since all anonymous users look alike, there is no way for the service providers to hold individual misbehaving anonymous users accountable for their actions. Recent
+research efforts have focused on using anonymous blacklisting systems (which are sometimes called anonymous revocation systems) to empower service providers with the ability to revoke access from abusive anonymous users. In contrast to revocable anonymity systems, which enable some trusted third party to deanonymize users, anonymous blacklisting systems provide users with a way to authenticate anonymously with a service provider, while enabling the service provider to revoke access from any users that misbehave, without revealing their identities. In this paper, we introduce the anonymous blacklisting
+problem and survey the literature on anonymous blacklisting systems, comparing and contrasting the architecture of various existing schemes, and discussing the tradeoffs inherent with each design. The literature on anonymous blacklisting systems lacks a unified set of definitions; each scheme operates under different trust assumptions and provides different security and privacy guarantees. Therefore, before we discuss the existing approaches in detail, we first propose a formal definition for anonymous blacklisting systems, and a set of security and privacy properties that these systems should possess. We also
outline a set of new performance requirements that anonymous blacklisting systems should satisfy to maximize their potential for real-world adoption, and give formal definitions for several optional features already supported by some schemes in the literature},
keywords = {anonymity, anonymous blacklisting, authentication, privacy enhancing technologies, privacy-enhanced revocation},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Formalizing\%20Anonymous\%20Blacklisting\%20Systems.pdf},
@@ -1502,8 +1489,8 @@ outline a set of new performance requirements that anonymous blacklisting system
pages = {0--240},
school = {University of Colorado},
type = {PhD},
- abstract = {Conventional wisdom dictates that the level of anonymity offered by low latency anonymity networks increases as the user base grows. However, the most significant obstacle to increased adoption of such systems is that their security and performance properties are perceived to be weak. In an effort to help foster adoption, this dissertation aims to better understand and improve security, anonymity, and performance in low latency anonymous communication systems.
-
+ abstract = {Conventional wisdom dictates that the level of anonymity offered by low latency anonymity networks increases as the user base grows. However, the most significant obstacle to increased adoption of such systems is that their security and performance properties are perceived to be weak. In an effort to help foster adoption, this dissertation aims to better understand and improve security, anonymity, and performance in low latency anonymous communication systems.
+
To better understand the security and performance properties of a popular low latency anonymity network, we characterize Tor, focusing on its application protocol distribution, geopolitical client and router distributions, and performance. For instance, we observe that peer-to-peer file sharing protocols use an unfair portion of the network{\textquoteright}s scarce bandwidth. To reduce the congestion produced by bulk downloaders in networks such as Tor, we design, implement, and analyze an anonymizing network tailored specifically for the BitTorrent peer-to-peer file sharing protocol. We next analyze Tor{\textquoteright}s security and anonymity properties and empirically show that Tor is vulnerable to practical end-to-end traffic correlation attacks launched by relatively weak adversaries that inflate their bandwidth claims to attract traffic and thereby compromise key positions on clients{\textquoteright} paths. We also explore the security and performance trade-offs that revolve around path length design decisions and we show that shorter paths offer performance benefits and provide increased resilience to certain attacks. Finally, we discover a source of performance degradation in Tor that results from poor congestion and flow control. To improve Tor{\textquoteright}s performance and grow its user base, we offer a fresh approach to congestion and flow control inspired by techniques from IP and ATM networks},
keywords = {low latency anonymous networks, performance, security},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/kevin-thesis.pdf},
@@ -1546,17 +1533,17 @@ To better understand the security and performance properties of a popular low la
pages = {0--234},
school = {Technische Universit{\"a}t M{\"u}nchen},
address = {Garching bei M{\"u}nchen},
- abstract = { The contribution of this thesis is the study and improvement of secure, decentralized, robust routing algorithms for open networks including ad-hoc networks and peer-to-peer (P2P) overlay networks. The main goals for our secure routing algorithm are openness, efficiency, scalability and resilience to various types of attacks. Common P2P routing algorithms trade-off decentralization for security; for instance by choosing whether or not to require a centralized authority to allow peers to join the network. Other algorithms trade scalability for security, for example employing random search or flooding to prevent certain types of attacks. Our design attempts to meet our security goals in an open system, while limiting the performance penalties incurred.
-
- The first step we took towards designing our routing algorithm was an analysis of the routing algorithm in Freenet. This algorithm is relevant because it achieves efficient (order O(log n)) routing in realistic network topologies in a fully decentralized open network. However, we demonstrate why their algorithm is not secure, as malicious participants are able to severely disrupt the operation of the network. The main difficulty with the Freenet routing algorithm is that for performance it relies on information received from untrusted peers. We also detail a range of proposed solutions, none of which we found to fully fix the problem.
-
- A related problem for efficient routing in sparsely connected networks is the difficulty in sufficiently populating routing tables. One way to improve connectivity in P2P overlay networks is by utilizing modern NAT traversal techniques. We employ a number of standard NAT traversal techniques in our approach, and also developed
-and experimented with a novel method for NAT traversal based on ICMP and UDP hole punching. Unlike other NAT traversal techniques ours does not require a trusted third party.
-
-Another technique we use in our implementation to help address the connectivity problem in sparse networks is the use of distance vector routing in a small local neighborhood. The distance vector variant used in our system employs onion routing to secure the resulting indirect connections. Materially to this design, we discovered a serious vulnerability in the Tor protocol which allowed us to use a DoS attack to reduce the anonymity of the users of this extant anonymizing P2P network. This vulnerability is based on allowing paths of unrestricted length for onion routes through the network. Analyzing Tor and implementing this attack gave us valuable knowledge
-which helped when designing the distance vector routing protocol for our system.
-
- Finally, we present the design of our new secure randomized routing algorithm that does not suffer from the various problems we discovered in previous designs. Goals for the algorithm include providing efficiency and robustness in the presence of malicious participants for an open, fully decentralized network without trusted authorities. We provide a mathematical analysis of the algorithm itself and have created and deployed an implementation of this algorithm in GNUnet. In this thesis we also provide a detailed overview of a distributed
+ abstract = { The contribution of this thesis is the study and improvement of secure, decentralized, robust routing algorithms for open networks including ad-hoc networks and peer-to-peer (P2P) overlay networks. The main goals for our secure routing algorithm are openness, efficiency, scalability and resilience to various types of attacks. Common P2P routing algorithms trade-off decentralization for security; for instance by choosing whether or not to require a centralized authority to allow peers to join the network. Other algorithms trade scalability for security, for example employing random search or flooding to prevent certain types of attacks. Our design attempts to meet our security goals in an open system, while limiting the performance penalties incurred.
+
+ The first step we took towards designing our routing algorithm was an analysis of the routing algorithm in Freenet. This algorithm is relevant because it achieves efficient (order O(log n)) routing in realistic network topologies in a fully decentralized open network. However, we demonstrate why their algorithm is not secure, as malicious participants are able to severely disrupt the operation of the network. The main difficulty with the Freenet routing algorithm is that for performance it relies on information received from untrusted peers. We also detail a range of proposed solutions, none of which we found to fully fix the problem.
+
+ A related problem for efficient routing in sparsely connected networks is the difficulty in sufficiently populating routing tables. One way to improve connectivity in P2P overlay networks is by utilizing modern NAT traversal techniques. We employ a number of standard NAT traversal techniques in our approach, and also developed
+and experimented with a novel method for NAT traversal based on ICMP and UDP hole punching. Unlike other NAT traversal techniques ours does not require a trusted third party.
+
+Another technique we use in our implementation to help address the connectivity problem in sparse networks is the use of distance vector routing in a small local neighborhood. The distance vector variant used in our system employs onion routing to secure the resulting indirect connections. Materially to this design, we discovered a serious vulnerability in the Tor protocol which allowed us to use a DoS attack to reduce the anonymity of the users of this extant anonymizing P2P network. This vulnerability is based on allowing paths of unrestricted length for onion routes through the network. Analyzing Tor and implementing this attack gave us valuable knowledge
+which helped when designing the distance vector routing protocol for our system.
+
+ Finally, we present the design of our new secure randomized routing algorithm that does not suffer from the various problems we discovered in previous designs. Goals for the algorithm include providing efficiency and robustness in the presence of malicious participants for an open, fully decentralized network without trusted authorities. We provide a mathematical analysis of the algorithm itself and have created and deployed an implementation of this algorithm in GNUnet. In this thesis we also provide a detailed overview of a distributed
emulation framework capable of running a large number of nodes using our full code base as well as some of the challenges encountered in creating and using such a testing framework. We present extensive experimental results showing that our routing algorithm outperforms the dominant DHT design in target topologies, and performs comparably in other scenarios},
keywords = {distributed hash table, Freenet, GNUnet, NAT, R5N, Tor},
isbn = {3-937201-26-2},
@@ -1602,10 +1589,10 @@ emulation framework capable of running a large number of nodes using our full co
pages = {29:1--29:34},
publisher = {ACM},
address = {New York, NY, USA},
- abstract = {Several anonymous authentication schemes allow servers to revoke a misbehaving user{\textquoteright}s future accesses.
-Traditionally, these schemes have relied on powerful Trusted Third Parties (TTPs) capable of deanonymizing
-(or linking) users{\textquoteright} connections. Such TTPs are undesirable because users{\textquoteright} anonymity is not guaranteed, and users must trust them to judge {\textquoteleft}misbehavior{\textquoteright} fairly. Recent schemes such as Blacklistable Anonymous Credentials (BLAC) and Enhanced Privacy ID (EPID) support {\textquotedblleft}privacy-enhanced revocation{\textquotedblright} {\textemdash} servers can revoke misbehaving users without a TTP{\textquoteright}s involvement, and without learning the revoked users{\textquoteright} identities.
-In BLAC and EPID, however, the computation required for authentication at the server is linear in the size (L) of the revocation list, which is impractical as the size approaches thousands of entries. We propose PEREA, a new anonymous authentication scheme for which this bottleneck of computation is independent of the size of the revocation list. Instead, the time complexity of authentication is linear in the size of a revocation window K L, the number of subsequent authentications before which a user{\textquoteright}s misbehavior must be recognized if the user is to be revoked. We extend PEREA to support more complex revocation policies that take the severity of misbehaviors into account. Users can authenticate anonymously if their naughtiness, i.e., the sum of the severities of their blacklisted misbehaviors, is below a certain naughtiness threshold.
+ abstract = {Several anonymous authentication schemes allow servers to revoke a misbehaving user{\textquoteright}s future accesses.
+Traditionally, these schemes have relied on powerful Trusted Third Parties (TTPs) capable of deanonymizing
+(or linking) users{\textquoteright} connections. Such TTPs are undesirable because users{\textquoteright} anonymity is not guaranteed, and users must trust them to judge {\textquoteleft}misbehavior{\textquoteright} fairly. Recent schemes such as Blacklistable Anonymous Credentials (BLAC) and Enhanced Privacy ID (EPID) support {\textquotedblleft}privacy-enhanced revocation{\textquotedblright} {\textemdash} servers can revoke misbehaving users without a TTP{\textquoteright}s involvement, and without learning the revoked users{\textquoteright} identities.
+In BLAC and EPID, however, the computation required for authentication at the server is linear in the size (L) of the revocation list, which is impractical as the size approaches thousands of entries. We propose PEREA, a new anonymous authentication scheme for which this bottleneck of computation is independent of the size of the revocation list. Instead, the time complexity of authentication is linear in the size of a revocation window K L, the number of subsequent authentications before which a user{\textquoteright}s misbehavior must be recognized if the user is to be revoked. We extend PEREA to support more complex revocation policies that take the severity of misbehaviors into account. Users can authenticate anonymously if their naughtiness, i.e., the sum of the severities of their blacklisted misbehaviors, is below a certain naughtiness threshold.
We call our extension PEREA-Naughtiness. We prove the security of our constructions, and validate their efficiency as compared to BLAC both analytically and quantitatively},
keywords = {anonymous authentication, anonymous blacklisting, privacy, privacy-enhanced revocation, user misbehavior},
issn = {1094-9224},
@@ -1631,7 +1618,7 @@ We call our extension PEREA-Naughtiness. We prove the security of our constructi
year = {2011},
month = aug,
address = {San Francisco, CA, USA},
- abstract = {Existing anonymous communication systems like Tor do not scale well as they require all users to maintain up-to-date information about all available Tor relays in the system. Current proposals for scaling anonymous communication advocate a peer-to-peer (P2P) approach. While the P2P paradigm scales to millions of nodes, it provides new opportunities to compromise anonymity. In this paper, we step away from the P2P paradigm and advocate a client-server approach to scalable anonymity. We propose PIR-Tor, an architecture for the Tor network in which users obtain information about only a few onion routers using private information retrieval techniques. Obtaining information about only a few onion routers is the key to the scalability of our approach, while the use of private retrieval information techniques helps preserve client anonymity. The security of our architecture depends on the security of PIR schemes which are
+ abstract = {Existing anonymous communication systems like Tor do not scale well as they require all users to maintain up-to-date information about all available Tor relays in the system. Current proposals for scaling anonymous communication advocate a peer-to-peer (P2P) approach. While the P2P paradigm scales to millions of nodes, it provides new opportunities to compromise anonymity. In this paper, we step away from the P2P paradigm and advocate a client-server approach to scalable anonymity. We propose PIR-Tor, an architecture for the Tor network in which users obtain information about only a few onion routers using private information retrieval techniques. Obtaining information about only a few onion routers is the key to the scalability of our approach, while the use of private retrieval information techniques helps preserve client anonymity. The security of our architecture depends on the security of PIR schemes which are
well understood and relatively easy to analyze, as opposed to peer-to-peer designs that require analyzing extremely complex and dynamic systems. In particular, we demonstrate that reasonable parameters of our architecture provide equivalent security to that of the Tor network. Moreover, our experimental results show that the overhead of PIR-Tor is manageable even when the Tor network scales by two orders of magnitude},
keywords = {anonymous communication, peer to peer, PIR-Tor},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/USENIX\%20-\%20PIR-Tor.pdf},
@@ -1662,28 +1649,28 @@ well understood and relatively easy to analyze, as opposed to peer-to-peer desig
school = {Technische Universit{\"a}t M{\"u}nchen},
type = {M.S},
address = {Garching bei M{\"u}nchen},
- abstract = {The Invisible Internet Project (I2P) is one of the most widely
-used anonymizing Peer-to-Peer networks on the Internet today. Like
-Tor, it uses onion routing to build tunnels between peers as the basis
-for providing anonymous communication channels. Unlike Tor, I2P
-integrates a range of anonymously hosted services directly with the
-platform. This thesis presents a new attack on the I2P Peer-to-Peer
-network, with the goal of determining the identity of peers that are
-anonymously hosting HTTP (Eepsite) services in the network.
-
-Key design choices made by I2P developers, in particular
-performance-based peer selection, enable a sophisticated adversary
-with modest resources to break key security assumptions. Our attack
-first obtains an estimate of the victim{\textquoteright}s view of the network. Then,
-the adversary selectively targets a small number of peers used by the
-victim with a denial-of-service attack while giving the victim the
-opportunity to replace those peers with other peers that are
-controlled by the adversary. Finally, the adversary performs some
-simple measurements to determine the identity of the peer hosting the
-service.
-
-This thesis provides the necessary background on I2P, gives details on
-the attack --- including experimental data from measurements against the
+ abstract = {The Invisible Internet Project (I2P) is one of the most widely
+used anonymizing Peer-to-Peer networks on the Internet today. Like
+Tor, it uses onion routing to build tunnels between peers as the basis
+for providing anonymous communication channels. Unlike Tor, I2P
+integrates a range of anonymously hosted services directly with the
+platform. This thesis presents a new attack on the I2P Peer-to-Peer
+network, with the goal of determining the identity of peers that are
+anonymously hosting HTTP (Eepsite) services in the network.
+
+Key design choices made by I2P developers, in particular
+performance-based peer selection, enable a sophisticated adversary
+with modest resources to break key security assumptions. Our attack
+first obtains an estimate of the victim{\textquoteright}s view of the network. Then,
+the adversary selectively targets a small number of peers used by the
+victim with a denial-of-service attack while giving the victim the
+opportunity to replace those peers with other peers that are
+controlled by the adversary. Finally, the adversary performs some
+simple measurements to determine the identity of the peer hosting the
+service.
+
+This thesis provides the necessary background on I2P, gives details on
+the attack --- including experimental data from measurements against the
actual I2P network --- and discusses possible solutions},
keywords = {anonymity, attack, denial-of-service, I2P},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/herrmann2011mt.pdf},
@@ -1698,13 +1685,13 @@ actual I2P network --- and discusses possible solutions},
publisher = {Springer Verlag},
organization = {Springer Verlag},
address = {Waterloo, Canada},
- abstract = {I2P is one of the most widely used anonymizing Peer-to-Peer networks on the Internet today. Like Tor, it uses onion routing to build tunnels between peers as the basis for providing anonymous communication channels. Unlike Tor, I2P integrates a range of anonymously hosted services directly with the platform. This paper presents a new attack on the I2P Peer-to-Peer network, with the goal
-of determining the identity of peers that are anonymously hosting HTTP services (Eepsite) in the network.
-
-Key design choices made by I2P developers, in particular
-performance-based peer selection, enable a sophisticated adversary with modest resources to break key security assumptions. Our attack first obtains an estimate of the victim{\textquoteright}s view of the network. Then, the adversary selectively targets a small number of peers used by the
-victim with a denial-of-service attack while giving the victim the opportunity to replace those peers with other peers that are controlled by the adversary. Finally, the adversary performs some simple measurements to determine the identity of the peer hosting the service.
-
+ abstract = {I2P is one of the most widely used anonymizing Peer-to-Peer networks on the Internet today. Like Tor, it uses onion routing to build tunnels between peers as the basis for providing anonymous communication channels. Unlike Tor, I2P integrates a range of anonymously hosted services directly with the platform. This paper presents a new attack on the I2P Peer-to-Peer network, with the goal
+of determining the identity of peers that are anonymously hosting HTTP services (Eepsite) in the network.
+
+Key design choices made by I2P developers, in particular
+performance-based peer selection, enable a sophisticated adversary with modest resources to break key security assumptions. Our attack first obtains an estimate of the victim{\textquoteright}s view of the network. Then, the adversary selectively targets a small number of peers used by the
+victim with a denial-of-service attack while giving the victim the opportunity to replace those peers with other peers that are controlled by the adversary. Finally, the adversary performs some simple measurements to determine the identity of the peer hosting the service.
+
This paper provides the necessary background on I2P, gives details on the attack --- including experimental data from measurements against the actual I2P network --- and discusses possible solutions},
keywords = {anonymity, attack, Guard, I2P, onion routing},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/pet2011i2p.pdf},
@@ -1720,9 +1707,9 @@ This paper provides the necessary background on I2P, gives details on the attack
pages = {357--377},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {In this paper, we address the problem of computing the similarity between two users (according
-to their profiles) while preserving their privacy in a fully decentralized system and for the passive adversary
-model. First, we introduce a two-party protocol for privately computing a threshold version of the similarity and apply it to well-known similarity measures such as the scalar product and the cosine similarity. The output of this protocol is only one bit of information telling whether or not two users are similar beyond a predetermined threshold. Afterwards, we explore the computation of the exact and threshold similarity within the context of differential privacy. Differential privacy is a recent notion developed within the field of private data analysis guaranteeing that an adversary that observes the output of the differentially
+ abstract = {In this paper, we address the problem of computing the similarity between two users (according
+to their profiles) while preserving their privacy in a fully decentralized system and for the passive adversary
+model. First, we introduce a two-party protocol for privately computing a threshold version of the similarity and apply it to well-known similarity measures such as the scalar product and the cosine similarity. The output of this protocol is only one bit of information telling whether or not two users are similar beyond a predetermined threshold. Afterwards, we explore the computation of the exact and threshold similarity within the context of differential privacy. Differential privacy is a recent notion developed within the field of private data analysis guaranteeing that an adversary that observes the output of the differentially
private mechanism, will only gain a negligible advantage (up to a privacy parameter) from the presence (or absence) of a particular item in the profile of a user. This provides a strong privacy guarantee that holds independently of the auxiliary knowledge that the adversary might have. More specifically, we design several differentially private variants of the exact and threshold protocols that rely on the addition of random noise tailored to the sensitivity of the considered similarity measure. We also analyze their complexity as well as their impact on the utility of the resulting similarity measure. Finally, we provide experimental results validating the effectiveness of the proposed approach on real datasets},
keywords = {Differential Privacy, homomorphic encryption, privacy, similarity measure},
isbn = {978-3-642-25872-5},
@@ -1760,7 +1747,7 @@ private mechanism, will only gain a negligible advantage (up to a privacy parame
publisher = {IEEE},
organization = {IEEE},
address = {Milan, Italy},
- abstract = {This paper describes a new secure DHT routing algorithm for open,
+ abstract = {This paper describes a new secure DHT routing algorithm for open,
decentralized P2P networks operating in a restricted-route environment with malicious participants. We have implemented our routing algorithm and have evaluated its performance under various topologies and in the presence of malicious peers. For small-world topologies, our algorithm provides significantly better performance when compared to existing methods. In more densely connected topologies, our performance is better than or on par with other designs},
keywords = {distributed hash table, GNUnet, R5N, routing},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/nss2011.pdf},
@@ -1774,9 +1761,9 @@ decentralized P2P networks operating in a restricted-route environment with mali
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {Berlin, Heidelberg},
- abstract = {Differential privacy is a notion that has emerged in the community of statistical databases, as a response to the problem of protecting the privacy of the database{\textquoteright}s participants when performing statistical queries. The idea is that a randomized query satisfies differential privacy if the likelihood of obtaining a certain answer for a database x is not too different from the likelihood of obtaining the same answer on adjacent databases, i.e. databases which differ from x for only one individual. Information flow is an area of Security concerned with the problem of controlling the leakage of confidential information in programs and protocols. Nowadays, one of the most established approaches to quantify and to reason about leakage is based on the R{\'e}nyi min entropy version of information theory.
-
-In this paper, we analyze critically the notion of differential privacy in light of the conceptual framework provided by the R{\'e}nyi min information theory. We show that there is a close relation between differential
+ abstract = {Differential privacy is a notion that has emerged in the community of statistical databases, as a response to the problem of protecting the privacy of the database{\textquoteright}s participants when performing statistical queries. The idea is that a randomized query satisfies differential privacy if the likelihood of obtaining a certain answer for a database x is not too different from the likelihood of obtaining the same answer on adjacent databases, i.e. databases which differ from x for only one individual. Information flow is an area of Security concerned with the problem of controlling the leakage of confidential information in programs and protocols. Nowadays, one of the most established approaches to quantify and to reason about leakage is based on the R{\'e}nyi min entropy version of information theory.
+
+In this paper, we analyze critically the notion of differential privacy in light of the conceptual framework provided by the R{\'e}nyi min information theory. We show that there is a close relation between differential
privacy and leakage, due to the graph symmetries induced by the adjacency relation. Furthermore, we consider the utility of the randomized answer, which measures its expected degree of accuracy. We focus on certain kinds of utility functions called {\textquotedblleft}binary{\textquotedblright}, which have a close correspondence with the R{\'e}nyi min mutual information. Again, it turns out that there can be a tight correspondence between differential privacy and utility, depending on the symmetries induced by the adjacency relation and by the query. Depending on these symmetries we can also build an optimal-utility randomization mechanism while preserving the required level of differential privacy. Our main contribution is a study of the kind of structures that can be induced by the adjacency relation and the query, and how to use them to derive bounds on the leakage and achieve the optimal utility},
isbn = {978-3-642-22011-1},
www_section = {http://dl.acm.org/citation.cfm?id=2027223.2027228},
@@ -1848,8 +1835,8 @@ privacy and leakage, due to the graph symmetries induced by the adjacency relati
publisher = {The Internet Society},
organization = {The Internet Society},
address = {San Diego, CA, USA},
- abstract = {We present a cryptographic framework to achieve access control, privacy of social relations, secrecy of resources, and anonymity of users in social networks. We illustrate our technique on a core API for social networking, which
-includes methods for establishing social relations and for sharing resources. The cryptographic protocols implementing these methods use pseudonyms to hide user identities, signatures on these pseudonyms to establish social relations, and zero-knowledge proofs of knowledge of such signatures to demonstrate the existence of social relations without sacrificing user anonymity. As we do not put any constraints on the underlying social network, our framework is generally applicable and, in particular, constitutes an ideal plug-in for decentralized social networks.
+ abstract = {We present a cryptographic framework to achieve access control, privacy of social relations, secrecy of resources, and anonymity of users in social networks. We illustrate our technique on a core API for social networking, which
+includes methods for establishing social relations and for sharing resources. The cryptographic protocols implementing these methods use pseudonyms to hide user identities, signatures on these pseudonyms to establish social relations, and zero-knowledge proofs of knowledge of such signatures to demonstrate the existence of social relations without sacrificing user anonymity. As we do not put any constraints on the underlying social network, our framework is generally applicable and, in particular, constitutes an ideal plug-in for decentralized social networks.
We analyzed the security of our protocols by developing formal definitions of the aforementioned security properties and by verifying them using ProVerif, an automated theorem prover for cryptographic protocols. Finally, we built a prototypical implementation and conducted an experimental evaluation to demonstrate the efficiency and the scalability of our framework},
keywords = {API, online-social-networks, security},
www_section = {http://www.lbs.cs.uni-saarland.de/publications/sapi.pdf },
@@ -1873,7 +1860,7 @@ We analyzed the security of our protocols by developing formal definitions of th
pages = {193--207},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {The pervasiveness of the Internet has lead research and applications to focus more and more on their users. Online social networks such as Facebook provide users with the ability to maintain an unprecedented number of social connections. Recommendation systems exploit the opinions of other users to suggest movies or products based on our similarity with them. This shift from machines to users motivates the emergence of novel applications and research challenges.
+ abstract = {The pervasiveness of the Internet has lead research and applications to focus more and more on their users. Online social networks such as Facebook provide users with the ability to maintain an unprecedented number of social connections. Recommendation systems exploit the opinions of other users to suggest movies or products based on our similarity with them. This shift from machines to users motivates the emergence of novel applications and research challenges.
In this paper, we embrace the social aspects of the Web 2.0 by considering a novel problem. We build a distributed social market that combines interest-based social networks with explicit networks like Facebook. Our Social Market (SM) allows users to identify and build connections to other users that can provide interesting goods, or information. At the same time, it backs up these connections with trust, by associating them with paths of trusted users that connect new acquaintances through the explicit network. This convergence of implicit and explicit networks yields TAPS, a novel gossip protocol that can be applied in applications devoted to commercial transactions, or to add robustness to standard gossip applications like dissemination or recommendation systems},
isbn = {978-3-642-24549-7},
doi = {10.1007/978-3-642-24550-3_16},
@@ -1890,9 +1877,9 @@ In this paper, we embrace the social aspects of the Web 2.0 by considering a nov
publisher = {ACM},
organization = {ACM},
address = {Chicago, IL, United States},
- abstract = {Anonymity systems such as Tor aim to enable users to communicate in a manner that is untraceable by adversaries that control a small number of machines. To provide efficient service to users, these anonymity systems make full use of forwarding capacity when sending traffic between intermediate relays. In this paper, we show that doing this leaks information about the set of Tor relays in a circuit (path). We present attacks that, with high confidence and based solely on throughput information, can (a) reduce the attacker{\textquoteright}s uncertainty about the bottleneck relay of any Tor circuit whose throughput can be observed, (b) exactly identify the guard relay(s) of a Tor user when circuit throughput can be observed over multiple connections, and (c) identify whether two concurrent TCP connections belong to the same Tor user, breaking unlinkability. Our attacks are stealthy, and cannot be readily detected by a user or by Tor relays. We validate our attacks using experiments over the live Tor network. We find that the attacker can substantially reduce the entropy of a bottleneck relay distribution of a Tor circuit whose throughput can be observed{\textemdash}the entropy gets
-reduced by a factor of 2 in the median case.
-Such information leaks from a single Tor circuit can be combined over multiple connections to exactly identify a user{\textquoteright}s guard relay(s). Finally, we are also able to link two connections from the same initiator with a crossover error rate of less
+ abstract = {Anonymity systems such as Tor aim to enable users to communicate in a manner that is untraceable by adversaries that control a small number of machines. To provide efficient service to users, these anonymity systems make full use of forwarding capacity when sending traffic between intermediate relays. In this paper, we show that doing this leaks information about the set of Tor relays in a circuit (path). We present attacks that, with high confidence and based solely on throughput information, can (a) reduce the attacker{\textquoteright}s uncertainty about the bottleneck relay of any Tor circuit whose throughput can be observed, (b) exactly identify the guard relay(s) of a Tor user when circuit throughput can be observed over multiple connections, and (c) identify whether two concurrent TCP connections belong to the same Tor user, breaking unlinkability. Our attacks are stealthy, and cannot be readily detected by a user or by Tor relays. We validate our attacks using experiments over the live Tor network. We find that the attacker can substantially reduce the entropy of a bottleneck relay distribution of a Tor circuit whose throughput can be observed{\textemdash}the entropy gets
+reduced by a factor of 2 in the median case.
+Such information leaks from a single Tor circuit can be combined over multiple connections to exactly identify a user{\textquoteright}s guard relay(s). Finally, we are also able to link two connections from the same initiator with a crossover error rate of less
than 1.5\% in under 5 minutes. Our attacks are also more accurate and require fewer resources than previous attacks on Tor},
keywords = {anonymity, attacks, throughput},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CCS\%2711\%20-\%20Throughput-fingerprinting.pdf},
@@ -1905,8 +1892,8 @@ than 1.5\% in under 5 minutes. Our attacks are also more accurate and require fe
year = {2011},
month = feb,
address = {San Diego, CA, USA},
- abstract = {Flow watermarks are active traffic analysis techniques that help establish a causal connection between two network flows by content-independent manipulations, e.g., altering packet timings. Watermarks provide a much more
-scalable approach for flow correlation than passive traffic analysis. Previous designs of scalable watermarks, however, were subject to multi-flow attacks. They also introduced delays too large to be used in most environments. We design SWIRL, a Scalable Watermark that is Invisible and Resilient to packet Losses. SWIRL is the first watermark that is practical to use for large-scale traffic analysis. SWIRL uses a flow-dependent approach to resist multi-flow
+ abstract = {Flow watermarks are active traffic analysis techniques that help establish a causal connection between two network flows by content-independent manipulations, e.g., altering packet timings. Watermarks provide a much more
+scalable approach for flow correlation than passive traffic analysis. Previous designs of scalable watermarks, however, were subject to multi-flow attacks. They also introduced delays too large to be used in most environments. We design SWIRL, a Scalable Watermark that is Invisible and Resilient to packet Losses. SWIRL is the first watermark that is practical to use for large-scale traffic analysis. SWIRL uses a flow-dependent approach to resist multi-flow
attacks, marking each flow with a different pattern. SWIRL is robust to packet losses and network jitter, yet it introduces only small delays that are invisible to both benign users and determined adversaries. We analyze the performance of SWIRL both analytically and on the PlanetLab testbed, demonstrating very low error rates. We consider applications of SWIRL to stepping stone detection and linking anonymous communication. We also propose a novel application of watermarks to defend against congestion attacks on Tor},
keywords = {anonymity, SWIRL, traffic analysis, watermarking},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/NDSS11-2.pdf},
@@ -1919,7 +1906,7 @@ attacks, marking each flow with a different pattern. SWIRL is robust to packet l
year = {2011},
month = aug,
address = {San Francisco, CA, USA},
- abstract = {In this paper, we present Telex, a new approach to resisting state-level Internet censorship. Rather than attempting to win the cat-and-mouse game of finding open proxies, we leverage censors{\textquoteright} unwillingness to completely block day-to-day Internet access. In effect, Telex converts innocuous, unblocked websites into proxies, without their explicit collaboration. We envision that friendly ISPs would deploy Telex stations on paths between censors{\textquoteright} networks and popular, uncensored Internet destinations. Telex stations would monitor seemingly innocuous flows for a special {\textquotedblleft}tag{\textquotedblright} and transparently divert them to a forbidden website or service instead. We propose a new cryptographic scheme based on elliptic curves for tagging TLS handshakes such that the tag is visible to a Telex
+ abstract = {In this paper, we present Telex, a new approach to resisting state-level Internet censorship. Rather than attempting to win the cat-and-mouse game of finding open proxies, we leverage censors{\textquoteright} unwillingness to completely block day-to-day Internet access. In effect, Telex converts innocuous, unblocked websites into proxies, without their explicit collaboration. We envision that friendly ISPs would deploy Telex stations on paths between censors{\textquoteright} networks and popular, uncensored Internet destinations. Telex stations would monitor seemingly innocuous flows for a special {\textquotedblleft}tag{\textquotedblright} and transparently divert them to a forbidden website or service instead. We propose a new cryptographic scheme based on elliptic curves for tagging TLS handshakes such that the tag is visible to a Telex
station but not to a censor. In addition, we use our tagging scheme to build a protocol that allows clients to connect to Telex stations while resisting both passive and active attacks. We also present a proof-of-concept implementation that demonstrates the feasibility of our system},
keywords = {anticensorship, network infrastructure state-level censorship, proxy, telex},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Telex\%3A\%20Anticensorship\%20in\%20the\%20Network\%20Infrastructure.pdf},
@@ -1966,9 +1953,9 @@ station but not to a censor. In addition, we use our tagging scheme to build a p
publisher = {ACM},
organization = {ACM},
address = {Chicago, IL, United States},
- abstract = {Low-latency anonymization networks such as Tor and JAP claim to hide the recipient and the content of communications from a local observer, i.e., an entity that can eavesdrop the traffic between the user and the first anonymization node. Especially users in totalitarian regimes strongly depend on such networks to freely communicate. For these people, anonymity is particularly important and an analysis of the anonymization methods against various attacks is necessary to ensure adequate protection. In this paper we show that anonymity in Tor and JAP is not as strong as expected so far and cannot resist website fingerprinting attacks under certain circumstances. We first define features for website
-fingerprinting solely based on volume, time, and direction of the traffic. As a result, the subsequent classification becomes much easier. We apply support vector machines with the introduced features. We are able to improve recognition
-results of existing works on a given state-of-the-art dataset in Tor from 3\% to 55\% and in JAP from 20\% to 80\%. The datasets assume a closed-world with 775 websites only. In a next step, we transfer our findings to a more complex and realistic open-world scenario, i.e., recognition of several websites in a set of thousands of random unknown websites. To the best of our knowledge, this work is the first successful attack in the open-world scenario. We achieve a surprisingly high true positive rate of up to 73\% for a false positive rate of 0.05\%. Finally, we show preliminary results of a proof-of-concept implementation that applies camouflage as a countermeasure to hamper the fingerprinting attack. For
+ abstract = {Low-latency anonymization networks such as Tor and JAP claim to hide the recipient and the content of communications from a local observer, i.e., an entity that can eavesdrop the traffic between the user and the first anonymization node. Especially users in totalitarian regimes strongly depend on such networks to freely communicate. For these people, anonymity is particularly important and an analysis of the anonymization methods against various attacks is necessary to ensure adequate protection. In this paper we show that anonymity in Tor and JAP is not as strong as expected so far and cannot resist website fingerprinting attacks under certain circumstances. We first define features for website
+fingerprinting solely based on volume, time, and direction of the traffic. As a result, the subsequent classification becomes much easier. We apply support vector machines with the introduced features. We are able to improve recognition
+results of existing works on a given state-of-the-art dataset in Tor from 3\% to 55\% and in JAP from 20\% to 80\%. The datasets assume a closed-world with 775 websites only. In a next step, we transfer our findings to a more complex and realistic open-world scenario, i.e., recognition of several websites in a set of thousands of random unknown websites. To the best of our knowledge, this work is the first successful attack in the open-world scenario. We achieve a surprisingly high true positive rate of up to 73\% for a false positive rate of 0.05\%. Finally, we show preliminary results of a proof-of-concept implementation that applies camouflage as a countermeasure to hamper the fingerprinting attack. For
JAP, the detection rate decreases from 80\% to 4\% and for Tor it drops from 55\% to about 3\%},
keywords = {anonymous communication, pattern recognition, privacy, traffic analysis, website fingerprinting},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/WPES\%2711\%20-\%20Fingerprinting.pdf},
@@ -2008,7 +1995,7 @@ JAP, the detection rate decreases from 80\% to 4\% and for Tor it drops from 55\
booktitle = {Security and Privacy (SP), 2011 IEEE Symposium on},
year = {2011},
month = {May},
- abstract = {Many commercial websites use recommender systems to help customers locate products and content. Modern recommenders are based on collaborative filtering: they use patterns learned from users{\textquoteright} behavior to make recommendations, usually in the form of related-items lists. The scale and complexity of these systems, along with the fact that their outputs reveal only relationships between items (as opposed to information about users), may suggest that they pose no meaningful privacy risk.
+ abstract = {Many commercial websites use recommender systems to help customers locate products and content. Modern recommenders are based on collaborative filtering: they use patterns learned from users{\textquoteright} behavior to make recommendations, usually in the form of related-items lists. The scale and complexity of these systems, along with the fact that their outputs reveal only relationships between items (as opposed to information about users), may suggest that they pose no meaningful privacy risk.
In this paper, we develop algorithms which take a moderate amount of auxiliary information about a customer and infer this customer{\textquoteright}s transactions from temporal changes in the public outputs of a recommender system. Our inference attacks are passive and can be carried out by any Internet user. We evaluate their feasibility using public data from popular websites Hunch, Last.fm, LibraryThing, and Amazon},
keywords = {accuracy, Amazon, collaboration, collaborative filtering, commercial Web sites, consumer behaviour, Covariance matrix, customer transactions, data privacy, groupware, History, Hunch, Inference algorithms, inference attacks, inference mechanisms, information filtering, Internet, Internet user, Last.fm, Library Thing, privacy, privacy risks, recommender systems, Web sites},
doi = {10.1109/SP.2011.40},
@@ -2058,7 +2045,7 @@ In this paper, we develop algorithms which take a moderate amount of auxiliary i
publisher = {IEEE},
organization = {IEEE},
address = {Delft, The Netherlands},
- abstract = {Traditional NAT traversal methods require the help of a third party for signalling. This paper investigates a new autonomous
+ abstract = {Traditional NAT traversal methods require the help of a third party for signalling. This paper investigates a new autonomous
method for establishing connections to peers behind NAT. The proposed method for Autonomous NAT traversal uses fake ICMP messages to initially contact the NATed peer. This paper presents how the method is supposed to work in theory, discusses some possible variations, introduces various concrete implementations of the proposed approach and evaluates empirical results of a measurement study designed to evaluate the efficacy of the idea in practice},
keywords = {GNUnet, ICMP, NAT, P2P},
www_section = {http://grothoff.org/christian/pwnat.pdf},
@@ -2112,10 +2099,10 @@ method for establishing connections to peers behind NAT. The proposed method fo
title = {Cryptographic Extraction and Key Derivation: The HKDF Scheme},
year = {2010},
note = {\url{http://eprint.iacr.org/}},
- abstract = {In spite of the central role of key derivation functions (KDF) in applied cryptography, there has been little formal work addressing the design and analysis of general multi-purpose KDFs. In practice, most KDFs (including those widely standardized) follow ad-hoc approaches that treat cryptographic hash functions as perfectly random functions. In this paper we close some gaps between theory and practice by contributing to the study and engineering of KDFs in several ways. We provide detailed rationale for the design of KDFs based on the extract-then-expand approach; we present the first general and rigorous definition of KDFs and their security which we base on the notion of computational extractors; we specify a concrete fully practical KDF based on the HMAC construction; and we provide an analysis of this construction based on the extraction and pseudorandom properties of HMAC. The resultant KDF design can support a large variety of KDF applications under suitable assumptions on the underlying hash function; particular attention and effort is devoted to minimizing these assumptions as much as possible for each usage scenario.
-
-Beyond the theoretical interest in modeling KDFs, this work is intended to address two important and timely needs of cryptographic applications: (i) providing a single hash-based KDF design that can be standardized for use in multiple and diverse applications, and (ii) providing a conservative, yet efficient, design that exercises much care in the way it utilizes a cryptographic hash function.
-
+ abstract = {In spite of the central role of key derivation functions (KDF) in applied cryptography, there has been little formal work addressing the design and analysis of general multi-purpose KDFs. In practice, most KDFs (including those widely standardized) follow ad-hoc approaches that treat cryptographic hash functions as perfectly random functions. In this paper we close some gaps between theory and practice by contributing to the study and engineering of KDFs in several ways. We provide detailed rationale for the design of KDFs based on the extract-then-expand approach; we present the first general and rigorous definition of KDFs and their security which we base on the notion of computational extractors; we specify a concrete fully practical KDF based on the HMAC construction; and we provide an analysis of this construction based on the extraction and pseudorandom properties of HMAC. The resultant KDF design can support a large variety of KDF applications under suitable assumptions on the underlying hash function; particular attention and effort is devoted to minimizing these assumptions as much as possible for each usage scenario.
+
+Beyond the theoretical interest in modeling KDFs, this work is intended to address two important and timely needs of cryptographic applications: (i) providing a single hash-based KDF design that can be standardized for use in multiple and diverse applications, and (ii) providing a conservative, yet efficient, design that exercises much care in the way it utilizes a cryptographic hash function.
+
(The HMAC-based scheme presented here, named HKDF, is being standardized by the IETF.)},
keywords = {GNUnet, HKDF, HMAC, key derivation},
www_section = {http://eprint.iacr.org/2010/264},
@@ -2190,8 +2177,8 @@ Beyond the theoretical interest in modeling KDFs, this work is intended to addre
year = {2010},
month = apr,
address = {Atlanta, Georgia, USA},
- abstract = {We present a new solution to protect the widely deployed KAD DHT against localized attacks which can take control over DHT entries. We show through measurements that the IDs distribution of the best peers found after a lookup
-process follows a geometric distribution. We then use this result to detect DHT attacks by comparing real peers{\textquoteright} ID distributions to the theoretical one thanks to the Kullback-Leibler divergence. When an attack is detected, we propose countermeasures that progressively remove suspicious peers from the list of possible contacts to provide a safe DHT access. Evaluations show that our
+ abstract = {We present a new solution to protect the widely deployed KAD DHT against localized attacks which can take control over DHT entries. We show through measurements that the IDs distribution of the best peers found after a lookup
+process follows a geometric distribution. We then use this result to detect DHT attacks by comparing real peers{\textquoteright} ID distributions to the theoretical one thanks to the Kullback-Leibler divergence. When an attack is detected, we propose countermeasures that progressively remove suspicious peers from the list of possible contacts to provide a safe DHT access. Evaluations show that our
method detects the most efficient attacks with a very small false-negative rate, while countermeasures successfully filter almost all malicious peers involved in an attack. Moreover, our solution completely fits the current design of the KAD network and introduces no network overhead},
keywords = {attack detection, attack mitigation, distributed hash table, IDs distribution, KAD, Sybil attack},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/HotP2P\%2710\%20-\%20KAD\%20DHT\%20attack\%20mitigation.pdf},
@@ -2248,9 +2235,9 @@ method detects the most efficient attacks with a very small false-negative rate,
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {Search and recommendation systems must effectively model user interests in order to provide personalized results. The proliferation of social software makes social network an increasingly important source for user interest modeling, be-
-cause of the social influence and correlation among friends. However, there are large variations in people{\textquoteright}s contribution of social content. Therefore, it is impractical to accurately model interests for all users. As a result, applications need to decide whether to utilize a user interest model based on its
-accuracy. To address this challenge, we present a study on the accuracy of user interests inferred from three types of social content: social bookmarking, file sharing, and electronic communication, in an organizational social network within a large-scale enterprise. First, we demonstrate that combining different types of social content to infer user interests
+ abstract = {Search and recommendation systems must effectively model user interests in order to provide personalized results. The proliferation of social software makes social network an increasingly important source for user interest modeling, be-
+cause of the social influence and correlation among friends. However, there are large variations in people{\textquoteright}s contribution of social content. Therefore, it is impractical to accurately model interests for all users. As a result, applications need to decide whether to utilize a user interest model based on its
+accuracy. To address this challenge, we present a study on the accuracy of user interests inferred from three types of social content: social bookmarking, file sharing, and electronic communication, in an organizational social network within a large-scale enterprise. First, we demonstrate that combining different types of social content to infer user interests
outperforms methods that use only one type of social content. Second, we present a technique to predict the inference accuracy based on easily observed network characteristics, including user activeness, network in-degree, out-degree, and betweenness centrality},
keywords = {accuracy, social networks, user modeling},
isbn = {978-1-60558-799-8},
@@ -2493,7 +2480,7 @@ outperforms methods that use only one type of social content. Second, we present
publisher = {Springer Berlin, Heidelberg},
organization = {Springer Berlin, Heidelberg},
address = {Essen, Germany},
- abstract = {The presentation of a landmark paper by Chu et al. at SIGMETRICS 2000 introduced application layer multicast (ALM) as completely new area of network research. Many researchers have since proposed ALM protocols, and have shown that these protocols only put a small burden on the network in terms of link-stress and -stretch. However, since the network is typically not a bottleneck, user acceptance remains the limiting factor for the deployment of ALM. In this paper we present an in-depth study of the user-perceived performance of the NICE ALM protocol. We use the OverSim simulation framework to evaluate delay experienced by a user and bandwidth consumption on the user{\textquoteright}s access link in large multicast groups and under aggressive churn models. Our major results are (1) latencies grow moderate with increasing number of nodes as clusters get optimized, (2) join delays get optimized over time, and (3) despite being a tree-dissemination protocol NICE handles churn surprisingly well when adjusting heartbeat intervals accordingly. We conclude that NICE comes up to the user{\textquoteright}s expectations even for large groups and under high churn.
+ abstract = {The presentation of a landmark paper by Chu et al. at SIGMETRICS 2000 introduced application layer multicast (ALM) as completely new area of network research. Many researchers have since proposed ALM protocols, and have shown that these protocols only put a small burden on the network in terms of link-stress and -stretch. However, since the network is typically not a bottleneck, user acceptance remains the limiting factor for the deployment of ALM. In this paper we present an in-depth study of the user-perceived performance of the NICE ALM protocol. We use the OverSim simulation framework to evaluate delay experienced by a user and bandwidth consumption on the user{\textquoteright}s access link in large multicast groups and under aggressive churn models. Our major results are (1) latencies grow moderate with increasing number of nodes as clusters get optimized, (2) join delays get optimized over time, and (3) despite being a tree-dissemination protocol NICE handles churn surprisingly well when adjusting heartbeat intervals accordingly. We conclude that NICE comes up to the user{\textquoteright}s expectations even for large groups and under high churn.
This work was partially funded as part of the Spontaneous Virtual Networks (SpoVNet) project by the Landesstiftung Baden-W{\"u}rttemberg within the BW-FIT program and as part of the Young Investigator Group Controlling Heterogeneous and Dynamic Mobile Grid and Peer-to-Peer Systems (CoMoGriP) by the Concept for the Future of Karlsruhe Institute of Technology (KIT) within the framework of the German Excellence Initiative},
isbn = {978-3-642-12103-6},
doi = {10.1007/978-3-642-12104-3},
@@ -2517,8 +2504,8 @@ This work was partially funded as part of the Spontaneous Virtual Networks (SpoV
pages = {380--389},
publisher = {ACM},
organization = {ACM},
- abstract = {Tor is an anonymous communications network with thousands of router nodes worldwide. An intuition reflected in much of the literature on anonymous communications is that, as an anonymity network grows, it becomes more secure against a given observer because the observer will see less of the network. In particular, as the Tor network grows from volunteers operating relays all over the world, it becomes less and less likely for a single autonomous system (AS) to be able to observe both ends of an anonymous connection. Yet, as the network continues to grow significantly, no analysis has been done to determine if this intuition is correct. Further, modifications to Tor{\textquoteright}s path selection algorithm to help clients avoid an AS-level observer have not been proposed and analyzed.
-
+ abstract = {Tor is an anonymous communications network with thousands of router nodes worldwide. An intuition reflected in much of the literature on anonymous communications is that, as an anonymity network grows, it becomes more secure against a given observer because the observer will see less of the network. In particular, as the Tor network grows from volunteers operating relays all over the world, it becomes less and less likely for a single autonomous system (AS) to be able to observe both ends of an anonymous connection. Yet, as the network continues to grow significantly, no analysis has been done to determine if this intuition is correct. Further, modifications to Tor{\textquoteright}s path selection algorithm to help clients avoid an AS-level observer have not been proposed and analyzed.
+
Five years ago a previous study examined the AS-level threat against client and destination addresses chosen a priori to be likely or interesting to examine. Using an AS-level path inference algorithm with improved accuracy, more extensive Internet routing data, and, most importantly, a model of typical Tor client AS-level sources and destinations based on data gathered from the live network, we demonstrate that the threat of a single AS observing both ends of an anonymous Tor connection is greater than previously thought. We look at the growth of the Tor network over the past five years and show that its explosive growth has had only a small impact on the network{\textquoteright}s robustness against an AS-level attacker. Finally, we propose and evaluate the effectiveness of some simple, AS-aware path selection algorithms that avoid the computational overhead imposed by full AS-level path inference algorithms. Our results indicate that a novel heuristic we propose is more effective against an AS-level observer than other commonly proposed heuristics for improving location diversity in path selection},
keywords = {anonymity, autonomous systems, privacy, Tor},
isbn = {978-1-60558-894-0},
@@ -2647,10 +2634,10 @@ Five years ago a previous study examined the AS-level threat against client and
pages = {173--187},
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
- abstract = {Operators of online social networks are increasingly sharing potentially sensitive information about users and their relationships with advertisers, application developers, and data-mining researchers. Privacy is typically protected by anonymization, i.e., removing names, addresses, etc.
-
-We present a framework for analyzing privacy and anonymity in social networks and develop a new re-identification algorithm targeting anonymized social-network graphs. To demonstrate its effectiveness on real-world networks, we show that a third of the users who can be verified to have accounts on both Twitter, a popular microblogging service, and Flickr, an online photo-sharing site, can be re-identified in the anonymous Twitter graph with only a 12\% error rate.
-
+ abstract = {Operators of online social networks are increasingly sharing potentially sensitive information about users and their relationships with advertisers, application developers, and data-mining researchers. Privacy is typically protected by anonymization, i.e., removing names, addresses, etc.
+
+We present a framework for analyzing privacy and anonymity in social networks and develop a new re-identification algorithm targeting anonymized social-network graphs. To demonstrate its effectiveness on real-world networks, we show that a third of the users who can be verified to have accounts on both Twitter, a popular microblogging service, and Flickr, an online photo-sharing site, can be re-identified in the anonymous Twitter graph with only a 12\% error rate.
+
Our de-anonymization algorithm is based purely on the network topology, does not require creation of a large number of dummy "sybil" nodes, is robust to noise and all existing defenses, and works even when the overlap between the target network and the adversary{\textquoteright}s auxiliary information is small},
keywords = {anonymity, network topology, privacy},
isbn = {978-0-7695-3633-0},
@@ -2682,12 +2669,12 @@ Our de-anonymization algorithm is based purely on the network topology, does not
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {We consider the problem of producing recommendations from collective user behavior while simultaneously providing guarantees of privacy for these users. Specifically, we consider the Netflix Prize data set, and its leading algorithms, adapted to the framework of differential privacy.
-
-Unlike prior privacy work concerned with cryptographically securing the computation of recommendations, differential privacy constrains a computation in a way that precludes any inference about the underlying records from its output. Such algorithms necessarily introduce uncertainty--i.e., noise--to computations, trading accuracy for privacy.
-
-We find that several of the leading approaches in the Netflix Prize competition can be adapted to provide differential privacy, without significantly degrading their accuracy. To adapt these algorithms, we explicitly factor them into two parts, an aggregation/learning phase that can be performed with differential privacy guarantees, and an individual recommendation phase that uses the learned correlations and an individual{\textquoteright}s data to provide personalized recommendations. The adaptations are non-trivial, and involve both careful analysis of the per-record sensitivity of the algorithms to calibrate noise, as well as new post-processing steps to mitigate the impact of this noise.
-
+ abstract = {We consider the problem of producing recommendations from collective user behavior while simultaneously providing guarantees of privacy for these users. Specifically, we consider the Netflix Prize data set, and its leading algorithms, adapted to the framework of differential privacy.
+
+Unlike prior privacy work concerned with cryptographically securing the computation of recommendations, differential privacy constrains a computation in a way that precludes any inference about the underlying records from its output. Such algorithms necessarily introduce uncertainty--i.e., noise--to computations, trading accuracy for privacy.
+
+We find that several of the leading approaches in the Netflix Prize competition can be adapted to provide differential privacy, without significantly degrading their accuracy. To adapt these algorithms, we explicitly factor them into two parts, an aggregation/learning phase that can be performed with differential privacy guarantees, and an individual recommendation phase that uses the learned correlations and an individual{\textquoteright}s data to provide personalized recommendations. The adaptations are non-trivial, and involve both careful analysis of the per-record sensitivity of the algorithms to calibrate noise, as well as new post-processing steps to mitigate the impact of this noise.
+
We measure the empirical trade-off between accuracy and privacy in these adaptations, and find that we can provide non-trivial formal privacy guarantees while still outperforming the Cinematch baseline Netflix provides},
keywords = {Differential Privacy, Netflix, recommender systems},
isbn = {978-1-60558-495-9},
@@ -2779,8 +2766,8 @@ We measure the empirical trade-off between accuracy and privacy in these adaptat
publisher = {Springer-Verlag New York, Inc},
organization = {Springer-Verlag New York, Inc},
address = {New York, NY, USA},
- abstract = {Gossip-based information dissemination protocols are considered easy to deploy, scalable and resilient to network dynamics. Load-balancing is inherent in these protocols as the dissemination work is evenly spread among all nodes. Yet, large-scale distributed systems are usually heterogeneous with respect to network capabilities such as bandwidth. In practice, a blind load-balancing strategy might significantly hamper the performance of the gossip dissemination.
-
+ abstract = {Gossip-based information dissemination protocols are considered easy to deploy, scalable and resilient to network dynamics. Load-balancing is inherent in these protocols as the dissemination work is evenly spread among all nodes. Yet, large-scale distributed systems are usually heterogeneous with respect to network capabilities such as bandwidth. In practice, a blind load-balancing strategy might significantly hamper the performance of the gossip dissemination.
+
This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynamically adapt their contribution to the gossip dissemination according to their bandwidth capabilities. Using a continuous, itself gossip-based, approximation of relative bandwidth capabilities, HEAP dynamically leverages the most capable nodes by increasing their fanout, while decreasing by the same proportion that of less capable nodes. HEAP preserves the simple and proactive (churn adaptation) nature of gossip, while significantly improving its effectiveness. We extensively evaluate HEAP in the context of a video streaming application on a testbed of 270 PlanetLab nodes. Our results show that HEAP significantly improves the quality of the streaming over standard homogeneous gossip protocols, especially when the stream rate is close to the average available bandwidth},
keywords = {heterogeneity, load balancing},
www_section = {http://portal.acm.org/citation.cfm?id=1656984$\#$},
@@ -2856,13 +2843,13 @@ This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynam
school = {Maastricht University},
type = {Master Thesis},
address = {Maastricht, Netherlands},
- abstract = {Modern board games present a new and challenging field when researching
-search techniques in the field of Artificial Intelligence. These games differ to
-classic board games, such as chess, in that they can be non-deterministic, have imperfect information or more than two players. While tree-search approaches, such as alpha-beta pruning, have been quite successful in playing classic board games, by for instance defeating the then reigning world champion Gary Kasparov in Chess, these techniques are not as effective when applied to modern board games.
-This thesis investigates the effectiveness of Monte-Carlo Tree Search when applied to a modern board game, for which the board game Thurn and Taxis was used. This is a non-deterministic modern board game with imperfect information that can be played with more than 2 players, and is hence suitable for research. First, the state-space and game-tree complexities of this game are computed, from which the conclusion can be drawn that the two-player version of the game has a complexity similar to the game Shogi. Several techniques are investigated in order to improve the sampling process, for instance by adding domain knowledge.
-Given the results of the experiments, one can conclude that Monte-Carlo Tree
-Search gives a slight performance increase over standard Monte-Carlo search.
-In addition, the most effective improvements appeared to be the application of
+ abstract = {Modern board games present a new and challenging field when researching
+search techniques in the field of Artificial Intelligence. These games differ to
+classic board games, such as chess, in that they can be non-deterministic, have imperfect information or more than two players. While tree-search approaches, such as alpha-beta pruning, have been quite successful in playing classic board games, by for instance defeating the then reigning world champion Gary Kasparov in Chess, these techniques are not as effective when applied to modern board games.
+This thesis investigates the effectiveness of Monte-Carlo Tree Search when applied to a modern board game, for which the board game Thurn and Taxis was used. This is a non-deterministic modern board game with imperfect information that can be played with more than 2 players, and is hence suitable for research. First, the state-space and game-tree complexities of this game are computed, from which the conclusion can be drawn that the two-player version of the game has a complexity similar to the game Shogi. Several techniques are investigated in order to improve the sampling process, for instance by adding domain knowledge.
+Given the results of the experiments, one can conclude that Monte-Carlo Tree
+Search gives a slight performance increase over standard Monte-Carlo search.
+In addition, the most effective improvements appeared to be the application of
pseudo-random simulations and limiting simulation lengths, while other techniques have been shown to be less effective or even ineffective. Overall, when applying the best performing techniques, an AI with advanced playing strength has been created, such that further research is likely to push this performance to a strength of expert level},
keywords = {artificial intelligence, MCTS, modern board game, Monte-Carlo Tree Search, search techniques},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Thesis\%20-\%20F.Schadd.pdf},
@@ -2909,7 +2896,7 @@ pseudo-random simulations and limiting simulation lengths, while other technique
pages = {1--18},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {We address one of the foundational problems in cryptography: the bias of coin-flipping protocols. Coin-flipping protocols allow mutually distrustful parties to generate a common unbiased random bit, guaranteeing that even if one of the parties is malicious, it cannot significantly bias the output of the honest party. A classical result by Cleve [STOC {\textquoteright}86] showed that for any two-party r-round coin-flipping protocol there exists an efficient adversary that can bias the output of the honest party by Ω(1/r). However, the best previously known protocol only guarantees O(1/√r) bias, and the question of whether Cleve{\textquoteright}s bound is tight has remained open for more than twenty years.
+ abstract = {We address one of the foundational problems in cryptography: the bias of coin-flipping protocols. Coin-flipping protocols allow mutually distrustful parties to generate a common unbiased random bit, guaranteeing that even if one of the parties is malicious, it cannot significantly bias the output of the honest party. A classical result by Cleve [STOC {\textquoteright}86] showed that for any two-party r-round coin-flipping protocol there exists an efficient adversary that can bias the output of the honest party by Ω(1/r). However, the best previously known protocol only guarantees O(1/√r) bias, and the question of whether Cleve{\textquoteright}s bound is tight has remained open for more than twenty years.
In this paper we establish the optimal trade-off between the round complexity and the bias of two-party coin-flipping protocols. Under standard assumptions (the existence of oblivious transfer), we show that Cleve{\textquoteright}s lower bound is tight: we construct an r-round protocol with bias O(1/r)},
isbn = {978-3-642-00456-8},
doi = {10.1007/978-3-642-00457-5_1},
@@ -2988,11 +2975,11 @@ In this paper we establish the optimal trade-off between the round complexity an
pages = {33--50},
publisher = {USENIX},
organization = {USENIX},
- abstract = {In 2005, Murdoch and Danezis demonstrated the first practical congestion attack against a deployed anonymity network. They could identify which relays were on a target Tor user{\textquoteright}s path by building paths one at a time through every Tor relay and introducing congestion. However, the original attack was performed on only 13 Tor relays on the nascent and lightly loaded Tor network.
-
-We show that the attack from their paper is no longer practical on today{\textquoteright}s 1500-relay heavily loaded Tor network. The attack doesn{\textquoteright}t scale because a) the attacker needs a tremendous amount of bandwidth to measure enough relays during the attack window, and b) there are too many
-false positives now that many other users are adding congestion at the same time as the attacks.
-
+ abstract = {In 2005, Murdoch and Danezis demonstrated the first practical congestion attack against a deployed anonymity network. They could identify which relays were on a target Tor user{\textquoteright}s path by building paths one at a time through every Tor relay and introducing congestion. However, the original attack was performed on only 13 Tor relays on the nascent and lightly loaded Tor network.
+
+We show that the attack from their paper is no longer practical on today{\textquoteright}s 1500-relay heavily loaded Tor network. The attack doesn{\textquoteright}t scale because a) the attacker needs a tremendous amount of bandwidth to measure enough relays during the attack window, and b) there are too many
+false positives now that many other users are adding congestion at the same time as the attacks.
+
We then strengthen the original congestion attack by combining it with a novel bandwidth amplification attack based on a flaw in the Tor design that lets us build long circuits that loop back on themselves. We show that this new combination attack is practical and effective by demonstrating a working attack on today{\textquoteright}s deployed Tor network. By coming up with a model to better understand Tor{\textquoteright}s routing behavior under congestion, we further provide a statistical analysis characterizing how effective our attack is in each case},
keywords = {anonymity, attack, denial-of-service, installation, Tor},
www_section = {http://grothoff.org/christian/tor.pdf},
@@ -3100,8 +3087,8 @@ We then strengthen the original congestion attack by combining it with a novel b
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {Wireless sensor networks (WSNs) are about to become a popular and inexpensive tool for all kinds of applications. More advanced applications also need end-to-end routing, which goes beyond the simple data dissemination and collection mechanisms of early WSNs. The special properties of WSNs -- scarce memory, CPU, and energy resources -- make this a challenge. The Dynamic Address Routing protocol (DART) could be a good candidate for WSN routing, if it were not so prone to link outages.
-
+ abstract = {Wireless sensor networks (WSNs) are about to become a popular and inexpensive tool for all kinds of applications. More advanced applications also need end-to-end routing, which goes beyond the simple data dissemination and collection mechanisms of early WSNs. The special properties of WSNs -- scarce memory, CPU, and energy resources -- make this a challenge. The Dynamic Address Routing protocol (DART) could be a good candidate for WSN routing, if it were not so prone to link outages.
+
In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protocol for large WSNs. It combines ideas from landmark routing, flooding, and dynamic address routing. SLF is robust against link and node outages, requires only little routing state, and generates low maintenance traffic overhead},
keywords = {wireless sensor network},
isbn = {978-1-60558-751-6},
@@ -3117,8 +3104,8 @@ In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protoc
month = {November},
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
- abstract = {We introduce Torsk, a structured peer-to-peer low-latency anonymity protocol. Torsk is designed as an interoperable replacement for the relay selection and directory service of the popular Tor anonymity network, that decreases the bandwidth cost of relay selection and maintenance from quadratic to quasilinear while introducing no new attacks on the anonymity provided by Tor, and no additional delay to connections made via Tor. The resulting bandwidth savings make a modest-sized Torsk network significantly cheaper to operate, and allows low-bandwidth clients to join the network.
-
+ abstract = {We introduce Torsk, a structured peer-to-peer low-latency anonymity protocol. Torsk is designed as an interoperable replacement for the relay selection and directory service of the popular Tor anonymity network, that decreases the bandwidth cost of relay selection and maintenance from quadratic to quasilinear while introducing no new attacks on the anonymity provided by Tor, and no additional delay to connections made via Tor. The resulting bandwidth savings make a modest-sized Torsk network significantly cheaper to operate, and allows low-bandwidth clients to join the network.
+
Unlike previous proposals for P2P anonymity schemes, Torsk does not require all users to relay traffic for others. Torsk utilizes a combination of two P2P lookup mechanisms with complementary strengths in order to avoid attacks on the confidentiality and integrity of lookups. We show by analysis that previously known attacks on P2P anonymity schemes do not apply to Torsk, and report on experiments conducted with a 336-node wide-area deployment of Torsk, demonstrating its efficiency and feasibility},
keywords = {P2P},
isbn = {978-1-60558-894-0},
@@ -3159,26 +3146,26 @@ Unlike previous proposals for P2P anonymity schemes, Torsk does not require all
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {Peer-to-peer approaches to anonymous communication pro-
-mise to eliminate the scalability concerns and central vulner-
-ability points of current networks such as Tor. However, the
-P2P setting introduces many new opportunities for attack,
-and previous designs do not provide an adequate level of
-anonymity. We propose ShadowWalker: a new low-latency
-P2P anonymous communication system, based on a random
-walk over a redundant structured topology. We base our de-
-sign on shadows that redundantly check and certify neigh-
-bor information; these certifications enable nodes to perform
-random walks over the structured topology while avoiding
-route capture and other attacks.
-We analytically calculate the anonymity provided by Sha-
-dowWalker and show that it performs well for moderate lev-
-els of attackers, and is much better than the state of the art.
-We also design an extension that improves forwarding per-
-formance at a slight anonymity cost, while at the same time
-protecting against selective DoS attacks. We show that our
-system has manageable overhead and can handle moderate
-churn, making it an attractive new design for P2P anony-
+ abstract = {Peer-to-peer approaches to anonymous communication pro-
+mise to eliminate the scalability concerns and central vulner-
+ability points of current networks such as Tor. However, the
+P2P setting introduces many new opportunities for attack,
+and previous designs do not provide an adequate level of
+anonymity. We propose ShadowWalker: a new low-latency
+P2P anonymous communication system, based on a random
+walk over a redundant structured topology. We base our de-
+sign on shadows that redundantly check and certify neigh-
+bor information; these certifications enable nodes to perform
+random walks over the structured topology while avoiding
+route capture and other attacks.
+We analytically calculate the anonymity provided by Sha-
+dowWalker and show that it performs well for moderate lev-
+els of attackers, and is much better than the state of the art.
+We also design an extension that improves forwarding per-
+formance at a slight anonymity cost, while at the same time
+protecting against selective DoS attacks. We show that our
+system has manageable overhead and can handle moderate
+churn, making it an attractive new design for P2P anony-
mous communication},
keywords = {anonymity, peer-to-peer, random walks},
isbn = {978-1-60558-894-0},
@@ -3209,10 +3196,10 @@ mous communication},
school = {University of Wollongog, New South Wales, Australia},
type = {PhD},
address = {Wollongog, New South Wales, Australia},
- abstract = {This thesis investigates issues with existing approaches to distributed constraint satisfaction, and proposes a solution in the form of a new algorithm. These issues are most evident when solving large distributed constraint satisfaction problems, hence the title of the thesis.
-We will first survey existing algorithms for centralised constraint satisfaction, and describe how they have been modified to handle distributed constraint satisfaction. The method by which each algorithm achieves completeness will be investigated and analysed by application of a new theorem.
-We will then present a new algorithm, Support-Based Distributed Search, developed explicitly for distributed constraint satisfaction rather than being derived from centralised algorithms. This algorithm is inspired by the inherent structure of human arguments and similar mechanisms we observe in real-world negotiations.
-A number of modifications to this new algorithm are considered, and comparisons are made with existing algorithms, effectively demonstrating its place within the field. Empirical analysis is then conducted, and comparisons are made to state-of-the-art algorithms most able to handle large distributed constraint satisfaction problems.
+ abstract = {This thesis investigates issues with existing approaches to distributed constraint satisfaction, and proposes a solution in the form of a new algorithm. These issues are most evident when solving large distributed constraint satisfaction problems, hence the title of the thesis.
+We will first survey existing algorithms for centralised constraint satisfaction, and describe how they have been modified to handle distributed constraint satisfaction. The method by which each algorithm achieves completeness will be investigated and analysed by application of a new theorem.
+We will then present a new algorithm, Support-Based Distributed Search, developed explicitly for distributed constraint satisfaction rather than being derived from centralised algorithms. This algorithm is inspired by the inherent structure of human arguments and similar mechanisms we observe in real-world negotiations.
+A number of modifications to this new algorithm are considered, and comparisons are made with existing algorithms, effectively demonstrating its place within the field. Empirical analysis is then conducted, and comparisons are made to state-of-the-art algorithms most able to handle large distributed constraint satisfaction problems.
Finally, it is argued that any future development in distributed constraint satisfaction will necessitate changes in the algorithms used to solve small {\textquoteleft}embedded{\textquoteright} constraint satisfaction problems. The impact on embedded constraint satisfaction problems is considered, with a brief presentation of an improved algorithm for hypertree decomposition},
keywords = {algorithms, distributed constraint satisfaction},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Thesis\%20-\%20P.Harvey.pdf},
@@ -3332,9 +3319,9 @@ Finally, it is argued that any future development in distributed constraint sati
month = feb,
publisher = {IEEE},
organization = {IEEE},
- abstract = {Recent work has shown that properties of network traffic that remain observable after encryption, namely packet sizes and timing, can reveal surprising information about the traffic{\textquoteright}s contents (e.g., the language of a VoIP call [29], passwords in secure shell logins [20], or even web browsing habits [21, 14]). While there are some legitimate uses for encrypted traffic analysis, these techniques also raise important questions about the privacy of encrypted communications. A common tactic for
-mitigating such threats is to pad packets to uniform sizes or to send packets at fixed timing intervals; however, this approach is often inefficient. In this paper, we propose a novel method for thwarting statistical traffic analysis
-algorithms by optimally morphing one class of traffic to look like another class. Through the use of convex optimization
+ abstract = {Recent work has shown that properties of network traffic that remain observable after encryption, namely packet sizes and timing, can reveal surprising information about the traffic{\textquoteright}s contents (e.g., the language of a VoIP call [29], passwords in secure shell logins [20], or even web browsing habits [21, 14]). While there are some legitimate uses for encrypted traffic analysis, these techniques also raise important questions about the privacy of encrypted communications. A common tactic for
+mitigating such threats is to pad packets to uniform sizes or to send packets at fixed timing intervals; however, this approach is often inefficient. In this paper, we propose a novel method for thwarting statistical traffic analysis
+algorithms by optimally morphing one class of traffic to look like another class. Through the use of convex optimization
techniques, we show how to optimally modify packets in real-time to reduce the accuracy of a variety of traffic classifiers while incurring much less overhead than padding. Our evaluation of this technique against two published traffic classifiers for VoIP [29] and web traffic [14] shows that morphing works well on a wide range of network data{\textemdash}in some cases, simultaneously providing better privacy and lower overhead than na{\textasciidieresis}{\i}ve defenses},
keywords = {privacy, traffic analysis, VoIP},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/morphing09.pdf},
@@ -3364,10 +3351,10 @@ techniques, we show how to optimally modify packets in real-time to reduce the a
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {Scalable source routing (SSR) is a network layer routing protocol that provides services that are similar to those of structured peer-to-peer overlays.
-
-In this paper, we describe several improvements to the SSR protocol. They aim at providing nodes with more up-to-date routing information: 1. The use of link-layer broadcast enables all neighbors of a node to contribute to the forwarding process. 2. A light-weight and fast selection mechanism avoids packet duplication and optimizes the source route iteratively. 3. Nodes implicitly learn the network{\textquoteright}s topology from overheard broadcast messages.
-
+ abstract = {Scalable source routing (SSR) is a network layer routing protocol that provides services that are similar to those of structured peer-to-peer overlays.
+
+In this paper, we describe several improvements to the SSR protocol. They aim at providing nodes with more up-to-date routing information: 1. The use of link-layer broadcast enables all neighbors of a node to contribute to the forwarding process. 2. A light-weight and fast selection mechanism avoids packet duplication and optimizes the source route iteratively. 3. Nodes implicitly learn the network{\textquoteright}s topology from overheard broadcast messages.
+
We present simulation results which show the performance gain of the proposed improvements: 1. The delivery ratio in settings with high mobility increases. 2. The required per-node state can be reduced as compared with the original SSR protocol. 3. The route stretch decreases. --- These improvements are achieved without increasing the routing overhead},
keywords = {mobile Ad-hoc networks, P2P, routing, scalable source routing},
isbn = {978-1-60558-569-7},
@@ -3384,8 +3371,8 @@ We present simulation results which show the performance gain of the proposed im
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {Privacy enhancing technologies like OpenSSL, OpenVPN or Tor establish an encrypted tunnel that enables users to hide content and addresses of requested websites from external observers This protection is endangered by local traffic analysis attacks that allow an external, passive attacker between the PET system and the user to uncover the identity of the requested sites. However, existing proposals for such attacks are not practicable yet.
-
+ abstract = {Privacy enhancing technologies like OpenSSL, OpenVPN or Tor establish an encrypted tunnel that enables users to hide content and addresses of requested websites from external observers This protection is endangered by local traffic analysis attacks that allow an external, passive attacker between the PET system and the user to uncover the identity of the requested sites. However, existing proposals for such attacks are not practicable yet.
+
We present a novel method that applies common text mining techniques to the normalised frequency distribution of observable IP packet sizes. Our classifier correctly identifies up to 97\% of requests on a sample of 775 sites and over 300,000 real-world traffic dumps recorded over a two-month period. It outperforms previously known methods like Jaccard{\textquoteright}s classifier and Na{\"\i}ve Bayes that neglect packet frequencies altogether or rely on absolute frequency values, respectively. Our method is system-agnostic: it can be used against any PET without alteration. Closed-world results indicate that many popular single-hop and even multi-hop systems like Tor and JonDonym are vulnerable against this general fingerprinting attack. Furthermore, we discuss important real-world issues, namely false alarms and the influence of the browser cache on accuracy},
keywords = {forensics, latency, text mining, traffic analysis},
isbn = {978-1-60558-784-4},
@@ -3434,8 +3421,8 @@ We present a novel method that applies common text mining techniques to the norm
month = {November},
publisher = {ACM},
organization = {ACM},
- abstract = {We design and analyze the first practical anonymous payment mechanisms for network services. We start by reporting on our experience with the implementation of a routing micropayment solution for Tor. We then propose micropayment protocols of increasingly complex requirements for networked services, such as P2P or cloud-hosted services.
-
+ abstract = {We design and analyze the first practical anonymous payment mechanisms for network services. We start by reporting on our experience with the implementation of a routing micropayment solution for Tor. We then propose micropayment protocols of increasingly complex requirements for networked services, such as P2P or cloud-hosted services.
+
The solutions are efficient, with bandwidth and latency overheads of under 4\% and 0.9 ms respectively (in ORPay for Tor), provide full anonymity (both for payers and payees), and support thousands of transactions per second},
keywords = {anonymity, onion routing, payment, privacy},
isbn = {978-1-60558-783-7 },
@@ -3449,7 +3436,7 @@ The solutions are efficient, with bandwidth and latency overheads of under 4\% a
booktitle = {AmI-Blocks{\textquoteright}08, European Conference on Ambient Intelligence 2008 manuscript No},
year = {2008},
month = jan,
- abstract = {Ambient Intelligence pursues the vision that small networked computers will jointly perform tasks that create the illusion of an intelligent environment. One of the most pressing challenges in this context is the question how one could easily develop software for such highly complex, but resource-scarce systems. In this paper we present a snapshot of our ongoing work towards facilitating oftware development for Am- bient Intelligence systems. In particular, we present the AmbiComp [1] platform. It consists of small, modular hardware, a
+ abstract = {Ambient Intelligence pursues the vision that small networked computers will jointly perform tasks that create the illusion of an intelligent environment. One of the most pressing challenges in this context is the question how one could easily develop software for such highly complex, but resource-scarce systems. In this paper we present a snapshot of our ongoing work towards facilitating oftware development for Am- bient Intelligence systems. In particular, we present the AmbiComp [1] platform. It consists of small, modular hardware, a
exible rmware including a Java Virtual Machine, and an Eclipse-based integrated development environment},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/publ_2008_eickhold-fuhrmann-saballus-ua_ambicomp.pdf},
author = {Johannes Eickhold and Thomas Fuhrmann and Bjoern Saballus and Sven Schlender and Thomas Suchy}
@@ -3472,8 +3459,8 @@ exible rmware including a Java Virtual Machine, and an Eclipse-based integrated
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
organization = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Estoril, Portugal},
- abstract = {Most former studies of Distributed Constraint Optimization Problems (DisCOPs) search considered only complete search algorithms, which are practical only for relatively small problems. Distributed local search algorithms can be used for solving DisCOPs. However, because of the differences between the global evaluation of a system{\textquoteright}s state and the private evaluation of states by agents, agents are unaware of the global best state which is explored by the algorithm. Previous attempts to use local search algorithms for solving DisCOPs reported the state held by the system at the termination of the algorithm, which was not necessarily the best state explored.
-
+ abstract = {Most former studies of Distributed Constraint Optimization Problems (DisCOPs) search considered only complete search algorithms, which are practical only for relatively small problems. Distributed local search algorithms can be used for solving DisCOPs. However, because of the differences between the global evaluation of a system{\textquoteright}s state and the private evaluation of states by agents, agents are unaware of the global best state which is explored by the algorithm. Previous attempts to use local search algorithms for solving DisCOPs reported the state held by the system at the termination of the algorithm, which was not necessarily the best state explored.
+
A general framework for implementing distributed local search algorithms for DisCOPs is proposed. The proposed framework makes use of a BFS-tree in order to accumulate the costs of the system{\textquoteright}s state in its different steps and to propagate the detection of a new best step when it is found. The resulting framework enhances local search algorithms for DisCOPs with the anytime property. The proposed framework does not require additional network load. Agents are required to hold a small (linear) additional space (beside the requirements of the algorithm in use). The proposed framework preserves privacy at a higher level than complete DisCOP algorithms which make use of a pseudo-tree (ADOPT, DPOP)},
keywords = {algorithms, BFS-Tree, DCOP, DisCOPs, framework},
isbn = {978-0-9817381-2-3},
@@ -3515,10 +3502,10 @@ A general framework for implementing distributed local search algorithms for Dis
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Berkeley, CA, USA},
- abstract = {Much recent work on Byzantine state machine replication focuses on protocols with improved performance under benign conditions (LANs, homogeneous replicas, limited crash faults), with relatively little evaluation under typical, practical conditions (WAN delays, packet loss, transient disconnection, shared resources). This makes it difficult for system designers to choose the appropriate protocol for a real target deployment. Moreover, most protocol implementations differ in their choice of runtime environment, crypto library, and transport, hindering direct protocol comparisons even under similar conditions.
-
-We present a simulation environment for such protocols that combines a declarative networking system with a robust network simulator. Protocols can be rapidly implemented from pseudocode in the high-level declarative language of the former, while network conditions and (measured) costs of communication packages and crypto primitives can be plugged into the latter. We show that the resulting simulator faithfully predicts the performance of native protocol implementations, both as published and as measured in our local network.
-
+ abstract = {Much recent work on Byzantine state machine replication focuses on protocols with improved performance under benign conditions (LANs, homogeneous replicas, limited crash faults), with relatively little evaluation under typical, practical conditions (WAN delays, packet loss, transient disconnection, shared resources). This makes it difficult for system designers to choose the appropriate protocol for a real target deployment. Moreover, most protocol implementations differ in their choice of runtime environment, crypto library, and transport, hindering direct protocol comparisons even under similar conditions.
+
+We present a simulation environment for such protocols that combines a declarative networking system with a robust network simulator. Protocols can be rapidly implemented from pseudocode in the high-level declarative language of the former, while network conditions and (measured) costs of communication packages and crypto primitives can be plugged into the latter. We show that the resulting simulator faithfully predicts the performance of native protocol implementations, both as published and as measured in our local network.
+
We use the simulator to compare representative protocols under identical conditions and rapidly explore the effects of changes in the costs of crypto operations, workloads, network conditions and faults. For example, we show that Zyzzyva outperforms protocols like PBFT and Q/U undermost but not all conditions, indicating that one-size-fits-all protocols may be hard if not impossible to design in practice},
isbn = {111-999-5555-22-1},
www_section = {http://portal.acm.org/citation.cfm?id=1387603$\#$},
@@ -3565,7 +3552,7 @@ We use the simulator to compare representative protocols under identical conditi
publisher = {IEEE},
organization = {IEEE},
address = {Turku, Finland},
- abstract = {In this paper, we present the first heuristic for fully distributed bootstrapping of peer-to-peer networks. Our heuristic generates a stream of promising IP addresses to be probed as entry points. This stream is generated using statistical profiles using the IP ranges of start-of-authorities (SOAs) in the domain name system (DNS). We
+ abstract = {In this paper, we present the first heuristic for fully distributed bootstrapping of peer-to-peer networks. Our heuristic generates a stream of promising IP addresses to be probed as entry points. This stream is generated using statistical profiles using the IP ranges of start-of-authorities (SOAs) in the domain name system (DNS). We
present experimental results demonstrating that with this approach it is efficient and practical to bootstrap Gnutella-sized peer-to-peer networks --- without the need for centralized services or the public exposure of end-user{\textquoteright}s private IP addresses},
keywords = {bootstrapping, DNS, installation, P2P},
www_section = {http://grothoff.org/christian/bootstrap.pdf},
@@ -3631,8 +3618,8 @@ present experimental results demonstrating that with this approach it is efficie
month = {September},
publisher = {Springer-Verlag Berlin, Heidelberg},
organization = {Springer-Verlag Berlin, Heidelberg},
- abstract = {We present a novel attack targeting anonymizing systems. The attack involves placing a malicious relay node inside an anonymizing system and keeping legitimate nodes "busy." We achieve this by creating circular circuits and injecting fraudulent packets, crafted in a way that will make them spin an arbitrary number of times inside our artificial loops. At the same time we inject a small number of malicious nodes that we control into the anonymizing system. By keeping a significant part of the anonymizing system busy spinning useless packets, we increase the probability of having our nodes selected in the creation of legitimate circuits, since we have more free capacity to route requests than the legitimate nodes. This technique may lead to the compromise of the anonymity of people using the system.
-
+ abstract = {We present a novel attack targeting anonymizing systems. The attack involves placing a malicious relay node inside an anonymizing system and keeping legitimate nodes "busy." We achieve this by creating circular circuits and injecting fraudulent packets, crafted in a way that will make them spin an arbitrary number of times inside our artificial loops. At the same time we inject a small number of malicious nodes that we control into the anonymizing system. By keeping a significant part of the anonymizing system busy spinning useless packets, we increase the probability of having our nodes selected in the creation of legitimate circuits, since we have more free capacity to route requests than the legitimate nodes. This technique may lead to the compromise of the anonymity of people using the system.
+
To evaluate our novel attack, we used a real-world anonymizing system, TOR. We show that an anonymizing system that is composed of a series of relay nodes which perform cryptographic operations is vulnerable to our packet spinning attack. Our evaluation focuses on determining the cost we can introduce to the legitimate nodes by injecting the fraudulent packets, and the time required for a malicious client to create n-length TOR circuits. Furthermore we prove that routers that are involved in packet spinning do not have the capacity to process requests for the creation of new circuits and thus users are forced to select our malicious nodes for routing their data streams},
keywords = {anonymity, attack, Tor},
isbn = {978-3-540-85884-3},
@@ -3675,7 +3662,7 @@ To evaluate our novel attack, we used a real-world anonymizing system, TOR. We s
school = {Universit{\"a}t Fridericiana (TH) },
type = {Doctoral},
address = {Karlsruhe, Germany},
- abstract = {Working in distributed systems is part of the information society. More and more people and organizations work with growing data volumes.
+ abstract = {Working in distributed systems is part of the information society. More and more people and organizations work with growing data volumes.
Often, part of the problem is to access large files in a share way. Until now, there are two often used approaches to allow this kind off access. Either the files are tranfered via FTP, e-mail or similar medium before the access happens, or a centralized server provides file services. The first alternative has the disadvantage that the entire file has to be transfered before the first access can be successful. If only small parts in the file have been changed compared to a previous version, the entire file has to be transfered anyway. The centralized approach has disadvantages regarding scalability and reliability. In both approaches authorization and authentication can be difficult in case users are seperated by untrusted network segements},
author = {unknown},
www_section = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000009668},
@@ -3777,8 +3764,8 @@ Often, part of the problem is to access large files in a share way. Until now, t
pages = {63--76},
publisher = {IEEE Press},
address = {Piscataway, NJ, USA},
- abstract = {Intermittently connected mobile networks are wireless networks where most of the time there does not exist a complete path from the source to the destination. There are many real networks that follow this model, for example, wildlife tracking sensor networks, military networks, vehicular ad hoc networks (VANETs), etc. In this context, conventional routing schemes would fail, because they try to establish complete end-to-end paths, before any data is sent.
-
+ abstract = {Intermittently connected mobile networks are wireless networks where most of the time there does not exist a complete path from the source to the destination. There are many real networks that follow this model, for example, wildlife tracking sensor networks, military networks, vehicular ad hoc networks (VANETs), etc. In this context, conventional routing schemes would fail, because they try to establish complete end-to-end paths, before any data is sent.
+
To deal with such networks researchers have suggested to use flooding-based routing schemes. While flooding-based schemes have a high probability of delivery, they waste a lot of energy and suffer from severe contention which can significantly degrade their performance. With this in mind, we look into a number of "single-copy" routing schemes that use only one copy per message, and hence significantly reduce the resource requirements of flooding-based algorithms. We perform a detailed exploration of the single-copy routing space in order to identify efficient single-copy solutions that (i) can be employed when low resource usage is critical, and (ii) can help improve the design of general routing schemes that use multiple copies. We also propose a theoretical framework that we use to analyze the performance of all single-copy schemes presented, and to derive upper and lower bounds on the delay of any scheme},
keywords = {mobile Ad-hoc networks, routing},
issn = {1063-6692},
@@ -4020,10 +4007,10 @@ To deal with such networks researchers have suggested to use flooding-based rout
month = {September},
school = {University of Waterloo},
type = {masters},
- abstract = {The Tor network gives anonymity to Internet users by relaying their traffic through the world over a variety of routers. This incurs latency, and this thesis first explores where this latency occurs. Experiments discount the latency induced by routing
-traffic and computational latency to determine there is a substantial component that is caused by delay in the communication path. We determine that congestion control is causing the delay.
-Tor multiplexes multiple streams of data over a single TCP connection. This is not a wise use of TCP, and as such results in the unfair application of congestion control. We illustrate an example of this occurrence on a Tor node on the live network and also illustrate how packet dropping and reordering cause interference between the multiplexed streams.
-Our solution is to use a TCP-over-DTLS (Datagram Transport Layer Security) transport between routers, and give each stream of data its own TCP connection. We give our design for our proposal, and details about its implementation. Finally, we perform experiments on our implemented version to illustrate that our proposal has in fact resolved the multiplexing issues discovered in our system performance analysis. The future work gives a number of steps towards optimizing and improving our work, along with some tangential ideas that were discovered during research.
+ abstract = {The Tor network gives anonymity to Internet users by relaying their traffic through the world over a variety of routers. This incurs latency, and this thesis first explores where this latency occurs. Experiments discount the latency induced by routing
+traffic and computational latency to determine there is a substantial component that is caused by delay in the communication path. We determine that congestion control is causing the delay.
+Tor multiplexes multiple streams of data over a single TCP connection. This is not a wise use of TCP, and as such results in the unfair application of congestion control. We illustrate an example of this occurrence on a Tor node on the live network and also illustrate how packet dropping and reordering cause interference between the multiplexed streams.
+Our solution is to use a TCP-over-DTLS (Datagram Transport Layer Security) transport between routers, and give each stream of data its own TCP connection. We give our design for our proposal, and details about its implementation. Finally, we perform experiments on our implemented version to illustrate that our proposal has in fact resolved the multiplexing issues discovered in our system performance analysis. The future work gives a number of steps towards optimizing and improving our work, along with some tangential ideas that were discovered during research.
Additionally, the open-source software projects latency proxy and libspe, which were designed for our purposes but programmed for universal applicability, are discussed},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/reardon-thesis.pdf},
author = {Reardon, Joel}
@@ -4051,8 +4038,8 @@ Additionally, the open-source software projects latency proxy and libspe, which
publisher = {ACM Press},
organization = {ACM Press},
address = {Alexandria, Virginia, USA},
- abstract = {We analyze information leaks in the lookup mechanisms of structured peer-to-peer anonymous communication systems and how these leaks can be used to compromise anonymity. We show that the techniques that are used to combat active attacks on the lookup mechanism dramatically increase information leaks and increase the efficacy of passive attacks. Thus there is a trade-off between robustness to active and passive attacks.
-
+ abstract = {We analyze information leaks in the lookup mechanisms of structured peer-to-peer anonymous communication systems and how these leaks can be used to compromise anonymity. We show that the techniques that are used to combat active attacks on the lookup mechanism dramatically increase information leaks and increase the efficacy of passive attacks. Thus there is a trade-off between robustness to active and passive attacks.
+
We study this trade-off in two P2P anonymous systems, Salsa and AP3. In both cases, we find that, by combining both passive and active attacks, anonymity can be compromised much more effectively than previously thought, rendering these systems insecure for most proposed uses. Our results hold even if security parameters are changed or other improvements to the systems are considered. Our study therefore motivates the search for new approaches to P2P anonymous communication},
keywords = {anonymity, attack, information leaks, P2P},
isbn = {978-1-59593-810-7},
@@ -4110,8 +4097,8 @@ We study this trade-off in two P2P anonymous systems, Salsa and AP3. In both cas
year = {2008},
pages = {23--48},
publisher = {JMLR.org},
- abstract = {Efficient and expressive comparison of sequences is an essential procedure for learning with sequential data. In this article we propose a generic framework for computation of similarity measures for sequences, covering various kernel, distance and non-metric similarity functions. The basis for comparison is embedding of sequences using a formal language, such as a set of natural words, k-grams or all contiguous subsequences. As realizations of the framework we provide linear-time algorithms of different complexity and capabilities using sorted arrays, tries and suffix trees as underlying data structures.
-
+ abstract = {Efficient and expressive comparison of sequences is an essential procedure for learning with sequential data. In this article we propose a generic framework for computation of similarity measures for sequences, covering various kernel, distance and non-metric similarity functions. The basis for comparison is embedding of sequences using a formal language, such as a set of natural words, k-grams or all contiguous subsequences. As realizations of the framework we provide linear-time algorithms of different complexity and capabilities using sorted arrays, tries and suffix trees as underlying data structures.
+
Experiments on data sets from bioinformatics, text processing and computer security illustrate the efficiency of the proposed algorithms---enabling peak performances of up to 106 pairwise comparisons per second. The utility of distances and non-metric similarity measures for sequences as alternatives to string kernels is demonstrated in applications of text categorization, network intrusion detection and transcription site recognition in DNA},
issn = {1532-4435},
www_section = {http://portal.acm.org/citation.cfm?id=1390683$\#$},
@@ -4227,7 +4214,7 @@ Experiments on data sets from bioinformatics, text processing and computer secur
publisher = {Springer},
organization = {Springer},
address = {Leuven, Belgium},
- abstract = {Despite the growth of the Internet and the increasing concern for privacy of online communications, current deployments of anonymization networks depend on a very small set of nodes that volunteer their bandwidth. We believe that the main reason is not disbelief in their ability to protect anonymity, but rather the practical limitations in bandwidth and latency that stem from limited participation. This limited participation, in turn, is due to a lack of incentives to participate. We propose providing economic incentives, which historically have worked very well.
+ abstract = {Despite the growth of the Internet and the increasing concern for privacy of online communications, current deployments of anonymization networks depend on a very small set of nodes that volunteer their bandwidth. We believe that the main reason is not disbelief in their ability to protect anonymity, but rather the practical limitations in bandwidth and latency that stem from limited participation. This limited participation, in turn, is due to a lack of incentives to participate. We propose providing economic incentives, which historically have worked very well.
In this paper, we demonstrate a payment scheme that can be used to compensate nodes which provide anonymity in Tor, an existing onion routing, anonymizing network. We show that current anonymous payment schemes are not suitable and introduce a hybrid payment system based on a combination of the Peppercoin Micropayment system and a new type of {\textquotedblleft}one use{\textquotedblright} electronic cash. Our system claims to maintain users{\textquoteright} anonymity, although payment techniques mentioned previously -- when adopted individually -- provably fail},
keywords = {anonymity, onion routing, Tor},
doi = {10.1007/978-3-540-70630-4},
@@ -4428,8 +4415,8 @@ In this paper, we demonstrate a payment scheme that can be used to compensate no
publisher = {Springer},
organization = {Springer},
address = {Leuven, Belgium},
- abstract = {To date, there has yet to be a study that characterizes the usage of a real deployed anonymity service. We present observations and analysis obtained by participating in the Tor network. Our primary goals are to better understand Tor as it is deployed and through this understanding, propose improvements. In particular, we are interested in answering the following questions: (1) How is Tor being used? (2) How is Tor being mis-used? (3) Who is using Tor?
-
+ abstract = {To date, there has yet to be a study that characterizes the usage of a real deployed anonymity service. We present observations and analysis obtained by participating in the Tor network. Our primary goals are to better understand Tor as it is deployed and through this understanding, propose improvements. In particular, we are interested in answering the following questions: (1) How is Tor being used? (2) How is Tor being mis-used? (3) Who is using Tor?
+
To sample the results, we show that web traffic makes up the majority of the connections and bandwidth, but non-interactive protocols consume a disproportionately large amount of bandwidth when compared to interactive protocols. We provide a survey of how Tor is being misused, both by clients and by Tor router operators. In particular, we develop a method for detecting exit router logging (in certain cases). Finally, we present evidence that Tor is used throughout the world, but router participation is limited to only a few countries},
keywords = {anonymity, Tor},
isbn = {978-3-540-70629-8},
@@ -4740,8 +4727,8 @@ To sample the results, we show that web traffic makes up the majority of the con
month = {October},
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
- abstract = {Several credential systems have been proposed in which users can authenticate to services anonymously. Since anonymity can give users the license to misbehave, some variants allow the selective deanonymization (or linking) of misbehaving users upon a complaint to a trusted third party (TTP). The ability of the TTP to revoke a user{\textquoteright}s privacy at any time, however, is too strong a punishment for misbehavior. To limit the scope of deanonymization, systems such as "e-cash" have been proposed in which users are deanonymized under only certain types of well-defined misbehavior such as "double spending." While useful in some applications, it is not possible to generalize such techniques to more subjective definitions of misbehavior.
-
+ abstract = {Several credential systems have been proposed in which users can authenticate to services anonymously. Since anonymity can give users the license to misbehave, some variants allow the selective deanonymization (or linking) of misbehaving users upon a complaint to a trusted third party (TTP). The ability of the TTP to revoke a user{\textquoteright}s privacy at any time, however, is too strong a punishment for misbehavior. To limit the scope of deanonymization, systems such as "e-cash" have been proposed in which users are deanonymized under only certain types of well-defined misbehavior such as "double spending." While useful in some applications, it is not possible to generalize such techniques to more subjective definitions of misbehavior.
+
We present the first anonymous credential system in which services can "blacklist" misbehaving users without contacting a TTP. Since blacklisted users remain anonymous, misbehaviors can be judged subjectively without users fearing arbitrary deanonymization by a TTP},
keywords = {privacy, revocation, user misbehavior},
isbn = {978-1-59593-703-2},
@@ -4759,10 +4746,10 @@ We present the first anonymous credential system in which services can "blacklis
pages = {49--60},
publisher = {ACM},
address = {New York, NY, USA},
- abstract = {Peer-to-peer systems promise inexpensive scalability, adaptability, and robustness. Thus, they are an attractive platform for file sharing, distributed wikis, and search engines. These applications often store weakly structured data, requiring sophisticated search algorithms. To simplify the search problem, most scalable algorithms introduce structure to the network. However, churn or violent disruption may break this structure, compromising search guarantees.
-
-This paper proposes a simple probabilistic search system, BubbleStorm, built on random multigraphs. Our primary contribution is a flexible and reliable strategy for performing exhaustive search. BubbleStorm also exploits the heterogeneous bandwidth of peers. However, we sacrifice some of this bandwidth for high parallelism and low latency. The provided search guarantees are tunable, with success probability adjustable well into the realm of reliable systems.
-
+ abstract = {Peer-to-peer systems promise inexpensive scalability, adaptability, and robustness. Thus, they are an attractive platform for file sharing, distributed wikis, and search engines. These applications often store weakly structured data, requiring sophisticated search algorithms. To simplify the search problem, most scalable algorithms introduce structure to the network. However, churn or violent disruption may break this structure, compromising search guarantees.
+
+This paper proposes a simple probabilistic search system, BubbleStorm, built on random multigraphs. Our primary contribution is a flexible and reliable strategy for performing exhaustive search. BubbleStorm also exploits the heterogeneous bandwidth of peers. However, we sacrifice some of this bandwidth for high parallelism and low latency. The provided search guarantees are tunable, with success probability adjustable well into the realm of reliable systems.
+
For validation, we simulate a network with one million low-end peers and show BubbleStorm handles up to 90\% simultaneous peer departure and 50\% simultaneous crash},
keywords = {exhaustive search, peer-to-peer networking, resilience, simulation},
issn = {0146-4833},
@@ -4805,12 +4792,12 @@ For validation, we simulate a network with one million low-end peers and show Bu
school = {University of Oregon},
type = {phd},
address = {Eugene, OR, USA},
- abstract = {Real-time, interactive, multi-user (RIM) applications are networked applications that allow users to collaborate and interact with each other over the Internet for work, education and training, or entertainment purposes. Multiplayer games, distance learning applications, collaborative whiteboards, immersive educational and training simulations, and distributed interactive simulations are examples of these applications. Of these RIM applications, multiplayer games are an important class for research due to their widespread deployment and popularity on the Internet. Research with multiplayer games will have a direct impact on all RIM applications.
-
-While large-scale multiplayer games have typically used a client/server architecture for network communication, we propose using a peer-to-peer architecture to solve the scalability problems inherent in centralized systems. Past research and actual deployments of peer-to-peer networks show that they can scale to millions of users. However, these prior peer-to-peer networks do not meet the low latency and interactive requirements that multi-player games need. Indeed, the fundamental problem of maintaining consistency between all nodes in the face of failures, delays, and malicious attacks has to be solved to make a peer-to-peer networks a viable solution.
-
-We propose solving the consistency problem through secure and scalable event ordering. While traditional event ordering requires all-to-all message passing and at least two rounds of communication, we argue that multiplayer games lend themselves naturally to a hierarchical decomposition of their state space so that we can reduce the communication cost of event ordering. We also argue that by using cryptography, a discrete view of time, and majority voting, we can totally order events in a real-time setting. By applying these two concepts, we can scale multiplayer games to millions of players.
-
+ abstract = {Real-time, interactive, multi-user (RIM) applications are networked applications that allow users to collaborate and interact with each other over the Internet for work, education and training, or entertainment purposes. Multiplayer games, distance learning applications, collaborative whiteboards, immersive educational and training simulations, and distributed interactive simulations are examples of these applications. Of these RIM applications, multiplayer games are an important class for research due to their widespread deployment and popularity on the Internet. Research with multiplayer games will have a direct impact on all RIM applications.
+
+While large-scale multiplayer games have typically used a client/server architecture for network communication, we propose using a peer-to-peer architecture to solve the scalability problems inherent in centralized systems. Past research and actual deployments of peer-to-peer networks show that they can scale to millions of users. However, these prior peer-to-peer networks do not meet the low latency and interactive requirements that multi-player games need. Indeed, the fundamental problem of maintaining consistency between all nodes in the face of failures, delays, and malicious attacks has to be solved to make a peer-to-peer networks a viable solution.
+
+We propose solving the consistency problem through secure and scalable event ordering. While traditional event ordering requires all-to-all message passing and at least two rounds of communication, we argue that multiplayer games lend themselves naturally to a hierarchical decomposition of their state space so that we can reduce the communication cost of event ordering. We also argue that by using cryptography, a discrete view of time, and majority voting, we can totally order events in a real-time setting. By applying these two concepts, we can scale multiplayer games to millions of players.
+
We develop our solution in two parts: a cheat-proof and real-time event ordering protocol and a scalable, hierarchical structure that organizes peers in a tree according to their scope of interest in the game. Our work represents the first, complete solution to this problem and we show through both proofs and simulations that our protocols allow the creation of large-scale, peer-to-peer games that are resistant to cheating while maintaining real-time responsiveness in the system},
www_section = {http://portal.acm.org/citation.cfm?id=1329865$\#$},
author = {Chis GauthierDickey}
@@ -4867,8 +4854,8 @@ We develop our solution in two parts: a cheat-proof and real-time event ordering
pages = {41--52},
publisher = {ACM},
address = {New York, NY, USA},
- abstract = {The Internet{\textquoteright}s routing system is facing stresses due to its poor fundamental scaling properties. Compact routing is a research field that studies fundamental limits of routing scalability and designs algorithms that try to meet these limits. In particular, compact routing research shows that shortest-path routing, forming a core of traditional routing algorithms, cannot guarantee routing table (RT) sizes that on all network topologies grow slower than linearly as functions of the network size. However, there are plenty of compact routing schemes that relax the shortest-path requirement and allow for improved, sublinear RT size scaling that is mathematically provable for all static network topologies. In particular, there exist compact routing schemes designed for grids, trees, and Internet-like topologies that offer RT sizes that scale logarithmically with the network size.
-
+ abstract = {The Internet{\textquoteright}s routing system is facing stresses due to its poor fundamental scaling properties. Compact routing is a research field that studies fundamental limits of routing scalability and designs algorithms that try to meet these limits. In particular, compact routing research shows that shortest-path routing, forming a core of traditional routing algorithms, cannot guarantee routing table (RT) sizes that on all network topologies grow slower than linearly as functions of the network size. However, there are plenty of compact routing schemes that relax the shortest-path requirement and allow for improved, sublinear RT size scaling that is mathematically provable for all static network topologies. In particular, there exist compact routing schemes designed for grids, trees, and Internet-like topologies that offer RT sizes that scale logarithmically with the network size.
+
In this paper, we demonstrate that in view of recent results in compact routing research, such logarithmic scaling on Internet-like topologies is fundamentally impossible in the presence of topology dynamics or topology-independent (flat) addressing. We use analytic arguments to show that the number of routing control messages per topology change cannot scale better than linearly on Internet-like topologies. We also employ simulations to confirm that logarithmic RT size scaling gets broken by topology-independent addressing, a cornerstone of popular locator-identifier split proposals aiming at improving routing scaling in the presence of network topology dynamics or host mobility. These pessimistic findings lead us to the conclusion that a fundamental re-examination of assumptions behind routing models and abstractions is needed in order to find a routing architecture that would be able to scale "indefinitely},
keywords = {compact routing, internet routing, routing scalability},
issn = {0146-4833},
@@ -4924,11 +4911,11 @@ In this paper, we demonstrate that in view of recent results in compact routing
month = {December},
school = {University of Cambridge},
type = {phd},
- abstract = {The spread of wide-scale Internet surveillance has spurred interest in anonymity systems that protect users{\textquoteright} privacy by restricting unauthorised access to their identity. This requirement can be considered as a flow control policy in the well established field of multilevel secure systems. I apply previous research on covert channels (unintended means to communicate in violation of a security policy) to analyse several anonymity systems in an innovative way.
-One application for anonymity systems is to prevent collusion in competitions. I show how covert channels may be exploited to violate these protections and construct defences against such attacks, drawing from previous covert channel research and collusion-resistant voting systems.
-In the military context, for which multilevel secure systems were designed, covert channels are increasingly eliminated by physical separation of interconnected single-role computers. Prior work on the remaining network covert channels has been solely based on protocol specifications. I examine some protocol implementations and show how the use of several covert channels can be detected and how channels can be modified to resist detection.
-I show how side channels (unintended information leakage) in anonymity networks may reveal the behaviour of users. While drawing on previous research on traffic analysis and covert channels, I avoid the traditional assumption of an omnipotent adversary. Rather, these attacks are feasible for an attacker with limited access to the network. The effectiveness of these techniques is demonstrated by experiments on a deployed anonymity network, Tor.
-Finally, I introduce novel covert and side channels which exploit thermal effects. Changes in temperature can be remotely induced through CPU load and measured by their effects on crystal clock skew. Experiments show this to be an effective attack against Tor. This side channel may also be usable for geolocation and, as a covert channel, can cross supposedly infallible air-gap security boundaries.
+ abstract = {The spread of wide-scale Internet surveillance has spurred interest in anonymity systems that protect users{\textquoteright} privacy by restricting unauthorised access to their identity. This requirement can be considered as a flow control policy in the well established field of multilevel secure systems. I apply previous research on covert channels (unintended means to communicate in violation of a security policy) to analyse several anonymity systems in an innovative way.
+One application for anonymity systems is to prevent collusion in competitions. I show how covert channels may be exploited to violate these protections and construct defences against such attacks, drawing from previous covert channel research and collusion-resistant voting systems.
+In the military context, for which multilevel secure systems were designed, covert channels are increasingly eliminated by physical separation of interconnected single-role computers. Prior work on the remaining network covert channels has been solely based on protocol specifications. I examine some protocol implementations and show how the use of several covert channels can be detected and how channels can be modified to resist detection.
+I show how side channels (unintended information leakage) in anonymity networks may reveal the behaviour of users. While drawing on previous research on traffic analysis and covert channels, I avoid the traditional assumption of an omnipotent adversary. Rather, these attacks are feasible for an attacker with limited access to the network. The effectiveness of these techniques is demonstrated by experiments on a deployed anonymity network, Tor.
+Finally, I introduce novel covert and side channels which exploit thermal effects. Changes in temperature can be remotely induced through CPU load and measured by their effects on crystal clock skew. Experiments show this to be an effective attack against Tor. This side channel may also be usable for geolocation and, as a covert channel, can cross supposedly infallible air-gap security boundaries.
This thesis demonstrates how theoretical models and generic methodologies relating to covert channels may be applied to find practical solutions to problems in real-world anonymity systems. These findings confirm the existing hypothesis that covert channel analysis, vulnerabilities and defences developed for multilevel secure systems apply equally well to anonymity systems},
www_section = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.62.5142},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/steven-thesis.pdf},
@@ -4972,8 +4959,8 @@ This thesis demonstrates how theoretical models and generic methodologies relati
publisher = {Australian Computer Society, Inc},
organization = {Australian Computer Society, Inc},
address = {Darlinghurst, Australia, Australia},
- abstract = {Low latency anonymous network systems, such as Tor, were considered secure against timing attacks when the threat model does not include a global adversary. In this threat model the adversary can only see part of the links in the system. In a recent paper entitled Low-cost traffic analysis of Tor, it was shown that a variant of timing attack that does not require a global adversary can be applied to Tor. More importantly, authors claimed that their attack would work on any low latency anonymous network systems. The implication of the attack is that all low latency anonymous networks will be vulnerable to this attack even if there is no global adversary.
-
+ abstract = {Low latency anonymous network systems, such as Tor, were considered secure against timing attacks when the threat model does not include a global adversary. In this threat model the adversary can only see part of the links in the system. In a recent paper entitled Low-cost traffic analysis of Tor, it was shown that a variant of timing attack that does not require a global adversary can be applied to Tor. More importantly, authors claimed that their attack would work on any low latency anonymous network systems. The implication of the attack is that all low latency anonymous networks will be vulnerable to this attack even if there is no global adversary.
+
In this paper, we investigate this claim against other low latency anonymous networks, including Tarzan and Morphmix. Our results show that in contrast to the claim of the aforementioned paper, the attack may not be applicable in all cases. Based on our analysis, we draw design principles for secure low latency anonymous network system (also secure against the above attack)},
keywords = {anonymity, latency, Morphmix, Tarzan, timing attack, Tor},
isbn = {1-920-68285-X},
@@ -5160,8 +5147,8 @@ In this paper, we investigate this claim against other low latency anonymous net
publisher = {ACM},
organization = {ACM},
address = {San Diego, CA, USA},
- abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHT shave been implemented in real systems and deployed on alarge scale. One exception is <scp>KAD</scp>, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling <scp>KAD</scp> continuously for about six months and obtained information about the total number of peers online and their geographical distribution.
-
+ abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHT shave been implemented in real systems and deployed on alarge scale. One exception is <scp>KAD</scp>, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling <scp>KAD</scp> continuously for about six months and obtained information about the total number of peers online and their geographical distribution.
+
Peers are identified by the so called KAD ID, which was up to now assumed to remain the same across sessions. However, we observed that this is not the case: There is a large number of peers, in particular in China, that change their KAD ID, sometimes as frequently as after each session. This change of KAD IDs makes it difficult to characterize end-user availability or membership turnover},
keywords = {distributed hash table, lookup, peer-to-peer networking},
isbn = {978-1-59593-908-1},
@@ -5234,8 +5221,8 @@ Peers are identified by the so called KAD ID, which was up to now assumed to rem
month = feb,
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {We show how to obfuscate a secret shuffle of ciphertexts: shuffling becomes a public operation. Given a trusted party that samples and obfuscates a shuffle before any ciphertexts are received, this reduces the problem of constructing a mix-net to verifiable joint decryption.
-We construct public-key obfuscations of a decryption shuffle based on the Boneh-Goh-Nissim (BGN) cryptosystem and a re-encryption shuffle based on the Paillier cryptosystem. Both allow efficient distributed verifiable decryption.
+ abstract = {We show how to obfuscate a secret shuffle of ciphertexts: shuffling becomes a public operation. Given a trusted party that samples and obfuscates a shuffle before any ciphertexts are received, this reduces the problem of constructing a mix-net to verifiable joint decryption.
+We construct public-key obfuscations of a decryption shuffle based on the Boneh-Goh-Nissim (BGN) cryptosystem and a re-encryption shuffle based on the Paillier cryptosystem. Both allow efficient distributed verifiable decryption.
Finally, we give a distributed protocol for sampling and obfuscating each of the above shuffles and show how it can be used in a trivial way to construct a universally composable mix-net. Our constructions are practical when the number of senders N is small, yet large enough to handle a number of practical cases, e.g. N = 350 in the BGN case and N = 2000 in the Paillier case},
keywords = {public key cryptography, re-encryption},
isbn = {978-3-540-70935-0},
@@ -5380,9 +5367,9 @@ Finally, we give a distributed protocol for sampling and obfuscating each of the
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
address = {Hiroshima, Japan},
- abstract = {Peer-to-peer (P2P) is a system of overlay networks such that participants can potentially take symmetrical roles.
-This translates itself into a design based on the philosophy of Local Production, Local Consumption (LPLC), originally an agricultural concept to promote sustainable local economy. This philosophy helps enhancing survivability of a society by providing a dependable economic infrastructure and promoting the power of individuals.
-
+ abstract = {Peer-to-peer (P2P) is a system of overlay networks such that participants can potentially take symmetrical roles.
+This translates itself into a design based on the philosophy of Local Production, Local Consumption (LPLC), originally an agricultural concept to promote sustainable local economy. This philosophy helps enhancing survivability of a society by providing a dependable economic infrastructure and promoting the power of individuals.
+
This paper attempts to put existing works of P2P designs into the perspective of the five-layer architecture model to realize LPLC, and proposes future research directions toward integration of P2P studies for actualization of a dependable and sustainable social infrastructure},
keywords = {LPLC, P2P, peer-to-peer networking},
doi = {http://doi.ieeecomputersociety.org/10.1109/SAINT-W.2007.59},
@@ -5414,19 +5401,19 @@ We investigate how Tor{\^a}€™s routing optimizations impact its ability to pro
month = mar,
pages = {169--176},
chapter = {169},
- abstract = {The use of elliptic curve cryptography (ECC) when used
-as a public-key cryptosystem for encryption is such that if
-one has a message to encrypt, then they attempt to map
-it to some point in the prime subgroup of the elliptic curve
-by systematically modifying the message in a determinis-
-tic manner. The applications typically used for ECC are
-the key-exchange, digital signature or a hybrid encryption
-systems (ECIES) all of which avoid this problem. In this
-paper we provide a deterministic method that guarantees
-that the map of a message to an elliptic curve point can
-be made without any modification. This paper provides
-a solution to the open problem posed in [7] concerning
-the creation of a deterministic method to map arbitrary
+ abstract = {The use of elliptic curve cryptography (ECC) when used
+as a public-key cryptosystem for encryption is such that if
+one has a message to encrypt, then they attempt to map
+it to some point in the prime subgroup of the elliptic curve
+by systematically modifying the message in a determinis-
+tic manner. The applications typically used for ECC are
+the key-exchange, digital signature or a hybrid encryption
+systems (ECIES) all of which avoid this problem. In this
+paper we provide a deterministic method that guarantees
+that the map of a message to an elliptic curve point can
+be made without any modification. This paper provides
+a solution to the open problem posed in [7] concerning
+the creation of a deterministic method to map arbitrary
message to an elliptic curve},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ijns-2009-v8-n2-p169-176.pdf},
author = {Brian King}
@@ -5472,8 +5459,8 @@ message to an elliptic curve},
pages = {343--360},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {Damg{\r a}rd et al. [11] showed a novel technique to convert a polynomial sharing of secret a into the sharings of the bits of a in constant rounds, which is called the bit-decomposition protocol. The bit-decomposition protocol is a very powerful tool because it enables bit-oriented operations even if shared secrets are given as elements in the field. However, the bit-decomposition protocol is relatively expensive.
-In this paper, we present a simplified bit-decomposition protocol by analyzing the original protocol. Moreover, we construct more efficient protocols for a comparison, interval test and equality test of shared secrets without relying on the bit-decomposition protocol though it seems essential to such bit-oriented operations. The key idea is that we do computation on secret a with c and r where c = a + r, c is a revealed value, and r is a random bitwise-shared secret. The outputs of these protocols are also shared without being revealed.
+ abstract = {Damg{\r a}rd et al. [11] showed a novel technique to convert a polynomial sharing of secret a into the sharings of the bits of a in constant rounds, which is called the bit-decomposition protocol. The bit-decomposition protocol is a very powerful tool because it enables bit-oriented operations even if shared secrets are given as elements in the field. However, the bit-decomposition protocol is relatively expensive.
+In this paper, we present a simplified bit-decomposition protocol by analyzing the original protocol. Moreover, we construct more efficient protocols for a comparison, interval test and equality test of shared secrets without relying on the bit-decomposition protocol though it seems essential to such bit-oriented operations. The key idea is that we do computation on secret a with c and r where c = a + r, c is a revealed value, and r is a random bitwise-shared secret. The outputs of these protocols are also shared without being revealed.
The realized protocols as well as the original protocol are constant-round and run with less communication rounds and less data communication than those of [11]. For example, the round complexities are reduced by a factor of approximately 3 to 10},
keywords = {Bitwise Sharing, Multiparty Computation, secret sharing},
isbn = {978-3-540-71676-1},
@@ -5525,10 +5512,10 @@ The realized protocols as well as the original protocol are constant-round and r
publisher = {Australian Computer Society, Inc},
organization = {Australian Computer Society, Inc},
address = {Darlinghurst, Australia, Australia},
- abstract = {Recently, privacy issues have become important in data analysis, especially when data is horizontally partitioned over several parties. In data mining, the data is typically represented as attribute-vectors and, for many applications, the scalar (dot) product is one of the fundamental operations that is repeatedly used.
-
-In privacy-preserving data mining, data is distributed across several parties. The efficiency of secure scalar products is important, not only because they can cause overhead in communication cost, but dot product operations also serve as one of the basic building blocks for many other secure protocols.
-
+ abstract = {Recently, privacy issues have become important in data analysis, especially when data is horizontally partitioned over several parties. In data mining, the data is typically represented as attribute-vectors and, for many applications, the scalar (dot) product is one of the fundamental operations that is repeatedly used.
+
+In privacy-preserving data mining, data is distributed across several parties. The efficiency of secure scalar products is important, not only because they can cause overhead in communication cost, but dot product operations also serve as one of the basic building blocks for many other secure protocols.
+
Although several solutions exist in the relevant literature for this problem, the need for more efficient and more practical solutions still remains. In this paper, we present a very efficient and very practical secure scalar product protocol. We compare it to the most common scalar product protocols. We not only show that our protocol is much more efficient than the existing ones, we also provide experimental results by using a real life dataset},
keywords = {privacy preserving data mining},
isbn = {978-1-920682-51-4},
@@ -5556,12 +5543,12 @@ Although several solutions exist in the relevant literature for this problem, th
publisher = {Morgan Kaufmann Publishers Inc},
organization = {Morgan Kaufmann Publishers Inc},
address = {Hyderabad, India},
- abstract = {Fully decentralized algorithms for distributed constraint optimization often require excessive amounts of communication when applied to complex problems. The OptAPO algorithm of [Mailler and Lesser, 2004] uses a strategy of partial centralization to mitigate this problem.
-
-We introduce PC-DPOP, a new partial centralization technique, based on the DPOP algorithm of [Petcu and Faltings, 2005]. PC-DPOP provides better control over what parts of the problem are centralized and allows this centralization to be optimal with respect to the chosen communication structure.
-
-Unlike OptAPO, PC-DPOP allows for a priory, exact predictions about privacy loss, communication, memory and computational requirements on all nodes and links in the network. Upper bounds on communication and memory requirements can be specified.
-
+ abstract = {Fully decentralized algorithms for distributed constraint optimization often require excessive amounts of communication when applied to complex problems. The OptAPO algorithm of [Mailler and Lesser, 2004] uses a strategy of partial centralization to mitigate this problem.
+
+We introduce PC-DPOP, a new partial centralization technique, based on the DPOP algorithm of [Petcu and Faltings, 2005]. PC-DPOP provides better control over what parts of the problem are centralized and allows this centralization to be optimal with respect to the chosen communication structure.
+
+Unlike OptAPO, PC-DPOP allows for a priory, exact predictions about privacy loss, communication, memory and computational requirements on all nodes and links in the network. Upper bounds on communication and memory requirements can be specified.
+
We also report strong efficiency gains over OptAPO in experiments on three problem domains},
keywords = {algorithms, distributed constraint optimization, DPOP, OptAPO, partial centralization technique},
www_section = {http://dl.acm.org/citation.cfm?id=1625275.1625301},
@@ -5590,8 +5577,8 @@ We also report strong efficiency gains over OptAPO in experiments on three probl
pages = {330--342},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {Yao{\textquoteright}s classical millionaires{\textquoteright} problem is about securely determining whether x > y, given two input values x,y, which are held as private inputs by two parties, respectively. The output x > y becomes known to both parties.
-In this paper, we consider a variant of Yao{\textquoteright}s problem in which the inputs x,y as well as the output bit x > y are encrypted. Referring to the framework of secure n-party computation based on threshold homomorphic cryptosystems as put forth by Cramer, Damg{\r a}rd, and Nielsen at Eurocrypt 2001, we develop solutions for integer comparison, which take as input two lists of encrypted bits representing x and y, respectively, and produce an encrypted bit indicating whether x > y as output. Secure integer comparison is an important building block for applications such as secure auctions.
+ abstract = {Yao{\textquoteright}s classical millionaires{\textquoteright} problem is about securely determining whether x > y, given two input values x,y, which are held as private inputs by two parties, respectively. The output x > y becomes known to both parties.
+In this paper, we consider a variant of Yao{\textquoteright}s problem in which the inputs x,y as well as the output bit x > y are encrypted. Referring to the framework of secure n-party computation based on threshold homomorphic cryptosystems as put forth by Cramer, Damg{\r a}rd, and Nielsen at Eurocrypt 2001, we develop solutions for integer comparison, which take as input two lists of encrypted bits representing x and y, respectively, and produce an encrypted bit indicating whether x > y as output. Secure integer comparison is an important building block for applications such as secure auctions.
In this paper, our focus is on the two-party case, although most of our results extend to the multi-party case. We propose new logarithmic-round and constant-round protocols for this setting, which achieve simultaneously very low communication and computational complexities. We analyze the protocols in detail and show that our solutions compare favorably to other known solutions},
keywords = {homomorphic encryption, Millionaires{\textquoteright} problem, secure multi-party computation},
isbn = {978-3-540-71676-1},
@@ -5617,8 +5604,8 @@ In this paper, our focus is on the two-party case, although most of our results
publisher = {IEEE Press},
organization = {IEEE Press},
address = {Anchorage, Alaska, USA},
- abstract = {The success of file swarming mechanisms such as BitTorrent has motivated a new approach for scalable streaming of live content that we call mesh-based Peer-to-Peer (P2P) streaming. In this approach, participating end-systems (or peers) form a randomly connected mesh and incorporate swarming content delivery to stream live content. Despite the growing popularity of this approach, neither the fundamental design tradeoffs nor the basic performance bottlenecks in mesh-based P2P streaming are well understood.
-
+ abstract = {The success of file swarming mechanisms such as BitTorrent has motivated a new approach for scalable streaming of live content that we call mesh-based Peer-to-Peer (P2P) streaming. In this approach, participating end-systems (or peers) form a randomly connected mesh and incorporate swarming content delivery to stream live content. Despite the growing popularity of this approach, neither the fundamental design tradeoffs nor the basic performance bottlenecks in mesh-based P2P streaming are well understood.
+
In this paper, we follow a performance-driven approach to design PRIME, a scalable mesh-based P2P streaming mechanism for live content. The main design goal of PRIME is to minimize two performance bottlenecks, namely bandwidth bottleneck and content bottleneck. We show that the global pattern of delivery for each segment of live content should consist of a diffusion phase which is followed by a swarming phase. This leads to effective utilization of available resources to accommodate scalability and also minimizes content bottleneck. Using packet level simulations, we carefully examine the impact of overlay connectivity, packet scheduling scheme at individual peers and source behavior on the overall performance of the system. Our results reveal fundamental design tradeoffs of mesh-based P2P streaming for live content},
keywords = {communication network, computer networks, Internet, multimedia communication, multimedia systems},
issn = {1063-6692},
@@ -5672,12 +5659,12 @@ In this paper, we follow a performance-driven approach to design PRIME, a scalab
month = {October},
school = {Laboratoire d{\textquoteright}Informatique (LIX), {\'E}cole Polytechnique, Paris},
type = {phd},
- abstract = {As the number of Internet activities increases, there is a growing amount of personal information about the users that is transferred using public electronic means, making it feasible to collect a huge amount of information about a person. As a consequence, the need for mechanisms to protect such information is compelling. In this thesis, we study security protocols with an emphasis on the property of anonymity and we propose methods to express and verify this property.
-
-Anonymity protocols often use randomization to introduce noise, thus limiting the inference power of a malicious observer. We consider a probabilistic framework in which a protocol is described by its set of anonymous information, observable information and the conditional probability of observing the latter given the former. In this framework we express two anonymity properties, namely strong anonymity and probable innocence.
-
-Then we aim at quantitative definitions of anonymity. We view protocols as noisy channels in the information-theoretic sense and we express their degree of anonymity as the converse of channel capacity. We apply this definition to two known anonymity protocols. We develop a monotonicity principle for the capacity, and use it to show a number of results for binary channels in the context of algebraic information theory. We then study the probability of error for the attacker in the context of Bayesian inference, showing that it is a piecewise linear function and using this fact to improve known bounds from the literature.
-
+ abstract = {As the number of Internet activities increases, there is a growing amount of personal information about the users that is transferred using public electronic means, making it feasible to collect a huge amount of information about a person. As a consequence, the need for mechanisms to protect such information is compelling. In this thesis, we study security protocols with an emphasis on the property of anonymity and we propose methods to express and verify this property.
+
+Anonymity protocols often use randomization to introduce noise, thus limiting the inference power of a malicious observer. We consider a probabilistic framework in which a protocol is described by its set of anonymous information, observable information and the conditional probability of observing the latter given the former. In this framework we express two anonymity properties, namely strong anonymity and probable innocence.
+
+Then we aim at quantitative definitions of anonymity. We view protocols as noisy channels in the information-theoretic sense and we express their degree of anonymity as the converse of channel capacity. We apply this definition to two known anonymity protocols. We develop a monotonicity principle for the capacity, and use it to show a number of results for binary channels in the context of algebraic information theory. We then study the probability of error for the attacker in the context of Bayesian inference, showing that it is a piecewise linear function and using this fact to improve known bounds from the literature.
+
Finally we study a problem that arises when we combine probabilities with nondeterminism, where the scheduler is too powerful even for trivially secure protocols. We propose a process calculus which allows to express restrictions to the scheduler, and we use it in the analysis of an anonymity and a contract-signing protocol},
www_section = {http://www.win.tue.nl/~kostas/these/},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/kostas-thesis.pdf},
@@ -5714,8 +5701,8 @@ Finally we study a problem that arises when we combine probabilities with nondet
school = {Technische Universit{\"a}t M{\"u}nchen},
type = {Diplomarbeit},
address = {Munich, Germany},
- abstract = {Unfortunately, from all known "Distributed Hash Table"-based overlay networks only a few of them relate to proximity in terms of latency. So a query routing can come with high latency when very distant hops are used. One can imagine hops are from one continent to the other in terms of here and back. Thereby it is possible that the target node is located close to the requesting node. Such cases increase query latency to a great extent and are responsible for performance bottlenecks of a query routing.
-There exist two main strategies to reduce latency in the query routing process: Proximity Neighbor Selection and Proximity Route Selection. As a new proposal of PNS for the IGOR overlay network, Merivaldi is developed. Merivaldi represents a combination of two basic ideas: The first idea is the Meridian framework and its Closest-Node- Discovery without synthetic coordinates. The second idea is Vivaldi, a distributed algorithm for predicting Internet latency between arbitrary Internet hosts. Merivaldi is quite similar to Meridian. It differs in using no direct Round Trip Time measurements like Meridian does to obtain latency characteristics between hosts. Merivaldi obtains latency characteristics of nodes using the latency prediction derived from the Vivaldi-coordinates. A Merivaldi-node forms exponentially growing latency-rings, i.e., the rings correspond to latency distances to the Merivaldi-node itself. In these rings node-references are inserted with regard to their latency characteristics. These node-references are obtained through a special protocol. A Merivaldi-node finds latency-closest nodes through periodic querying its ring-members for closer nodes. If a closer node is found by a ring-member the query is forwarded to this one until no closer one can be found. The closest on this way reports itself to the Merivaldi-node.
+ abstract = {Unfortunately, from all known "Distributed Hash Table"-based overlay networks only a few of them relate to proximity in terms of latency. So a query routing can come with high latency when very distant hops are used. One can imagine hops are from one continent to the other in terms of here and back. Thereby it is possible that the target node is located close to the requesting node. Such cases increase query latency to a great extent and are responsible for performance bottlenecks of a query routing.
+There exist two main strategies to reduce latency in the query routing process: Proximity Neighbor Selection and Proximity Route Selection. As a new proposal of PNS for the IGOR overlay network, Merivaldi is developed. Merivaldi represents a combination of two basic ideas: The first idea is the Meridian framework and its Closest-Node- Discovery without synthetic coordinates. The second idea is Vivaldi, a distributed algorithm for predicting Internet latency between arbitrary Internet hosts. Merivaldi is quite similar to Meridian. It differs in using no direct Round Trip Time measurements like Meridian does to obtain latency characteristics between hosts. Merivaldi obtains latency characteristics of nodes using the latency prediction derived from the Vivaldi-coordinates. A Merivaldi-node forms exponentially growing latency-rings, i.e., the rings correspond to latency distances to the Merivaldi-node itself. In these rings node-references are inserted with regard to their latency characteristics. These node-references are obtained through a special protocol. A Merivaldi-node finds latency-closest nodes through periodic querying its ring-members for closer nodes. If a closer node is found by a ring-member the query is forwarded to this one until no closer one can be found. The closest on this way reports itself to the Merivaldi-node.
Exemplary analysis show that Merivaldi means only a modest burden for the network. Merivaldi uses O(log N) CND-hops at maximum to recognize a closest node, where N is the number of nodes. Empirical tests demonstrate this analysis. Analysis shows, the overhead for a Merivaldi-node is modest. It is shown that Merivaldi{\textquoteright}s Vivaldi works with high quality with the used PING-message type},
keywords = {IGOR, neighbor selection, overlay-network, proximity route selection},
www_section = {http://i30www.ira.uka.de/teaching/theses/pasttheses/},
@@ -5742,12 +5729,12 @@ Exemplary analysis show that Merivaldi means only a modest burden for the networ
pages = {305--314},
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
- abstract = {In many networks, such as mobile ad-hoc networks and friend-to-friend overlay networks, direct communication between nodes is limited to specific neighbors. Often these networks have a small-world topology; while short paths exist between any pair of nodes in small-world networks, it is non-trivial to determine such paths with a distributed algorithm. Recently, Clarke and Sandberg
-proposed the first decentralized routing algorithm that achieves efficient routing in such small-world networks.
-
-This paper is the first independent security analysis of Clarke and Sandberg{\textquoteright}s routing algorithm. We show that a relatively weak participating adversary can render the overlay ineffective without being detected, resulting in significant data loss due to the resulting load imbalance. We have measured the impact of the attack
-in a testbed of 800 nodes using minor modifications to Clarke and Sandberg{\textquoteright}s implementation of their routing algorithm in Freenet. Our experiments show that the attack is highly effective, allowing a small number of malicious nodes to cause rapid loss of data on the entire network.
-
+ abstract = {In many networks, such as mobile ad-hoc networks and friend-to-friend overlay networks, direct communication between nodes is limited to specific neighbors. Often these networks have a small-world topology; while short paths exist between any pair of nodes in small-world networks, it is non-trivial to determine such paths with a distributed algorithm. Recently, Clarke and Sandberg
+proposed the first decentralized routing algorithm that achieves efficient routing in such small-world networks.
+
+This paper is the first independent security analysis of Clarke and Sandberg{\textquoteright}s routing algorithm. We show that a relatively weak participating adversary can render the overlay ineffective without being detected, resulting in significant data loss due to the resulting load imbalance. We have measured the impact of the attack
+in a testbed of 800 nodes using minor modifications to Clarke and Sandberg{\textquoteright}s implementation of their routing algorithm in Freenet. Our experiments show that the attack is highly effective, allowing a small number of malicious nodes to cause rapid loss of data on the entire network.
+
We also discuss various proposed countermeasures designed to detect, thwart or limit the attack. While we were unable to find effective countermeasures, we hope that the presented analysis will be a first step towards the design of secure distributed routing algorithms for restricted-route topologies},
keywords = {denial-of-service, Freenet, installation, routing},
www_section = {http://grothoff.org/christian/pitchblack.pdf},
@@ -5776,10 +5763,10 @@ We also discuss various proposed countermeasures designed to detect, thwart or l
pages = {0--74},
school = {Technische Universit{\"a}t M{\"u}nchen},
address = {Munich, Germany},
- abstract = {Distributed file systems have been a topic of interest for a long time and there are many file systems that are distributed in one way or another. However most distributed file systems are only reasonably usable within a local network of computers and some main tasks are still delegated to a very small number of servers.
-Today with the advent of Peer-to-Peer technology, distributed file systems that work on top of Peer-to-Peer systems can be built. These systems can be built with no or much less centralised components and are usable on a global scale.
-The System Architecture Group at the University of Karlsruhe in Germany has developedsuch a file system, which is built on top of a structured overlay network and uses Distributed Hash Tables to store and access the information.
-One problem with this approach is, that each file system can only be accessed with the help of an identifier, which changes whenever a file system is modified. All clients have to be notified of the new identifier in a secure, fast and reliable way.
+ abstract = {Distributed file systems have been a topic of interest for a long time and there are many file systems that are distributed in one way or another. However most distributed file systems are only reasonably usable within a local network of computers and some main tasks are still delegated to a very small number of servers.
+Today with the advent of Peer-to-Peer technology, distributed file systems that work on top of Peer-to-Peer systems can be built. These systems can be built with no or much less centralised components and are usable on a global scale.
+The System Architecture Group at the University of Karlsruhe in Germany has developedsuch a file system, which is built on top of a structured overlay network and uses Distributed Hash Tables to store and access the information.
+One problem with this approach is, that each file system can only be accessed with the help of an identifier, which changes whenever a file system is modified. All clients have to be notified of the new identifier in a secure, fast and reliable way.
Usually the strategy to solve this type of problem is an encrypted multicast. This thesis presents and analyses several strategies of using multicast distributions to solve this problem and then unveils our final solution based on the Subset Difference method proposed by Naor et al},
keywords = {distributed file system, distributed hash table, peer-to-peer networking, store information},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Amann\%20-\%20Secure\%20asynchronous\%20change\%20notifications.pdf},
@@ -5941,8 +5928,8 @@ Usually the strategy to solve this type of problem is an encrypted multicast. Th
year = {2007},
publisher = {USENIX Association Berkeley, CA, USA},
organization = {USENIX Association Berkeley, CA, USA},
- abstract = {This paper investigates the problem of designing anonymity networks that meet application-specific performance and security constraints. We argue that existing anonymity networks take a narrow view of performance by considering only the strength of the offered anonymity. However, real-world applications impose a myriad of communication requirements, including end-to-end bandwidth and latency, trustworthiness of intermediary routers, and network jitter.
-
+ abstract = {This paper investigates the problem of designing anonymity networks that meet application-specific performance and security constraints. We argue that existing anonymity networks take a narrow view of performance by considering only the strength of the offered anonymity. However, real-world applications impose a myriad of communication requirements, including end-to-end bandwidth and latency, trustworthiness of intermediary routers, and network jitter.
+
We pose a grand challenge for anonymity: the development of a network architecture that enables applications to customize routes that tradeoff between anonymity and performance. Towards this challenge, we present the Application-Aware Anonymity (A3) routing service. We envision that A3 will serve as a powerful and flexible anonymous communications layer that will spur the future development of anonymity services},
keywords = {anonymity, routing},
www_section = {http://portal.acm.org/citation.cfm?id=1361423},
@@ -6051,8 +6038,8 @@ We pose a grand challenge for anonymity: the development of a network architectu
pages = {89--100},
publisher = {ACM},
address = {New York, NY, USA},
- abstract = {Dynamic binary instrumentation (DBI) frameworks make it easy to build dynamic binary analysis (DBA) tools such as checkers and profilers. Much of the focus on DBI frameworks has been on performance; little attention has been paid to their capabilities. As a result, we believe the potential of DBI has not been fully exploited.
-
+ abstract = {Dynamic binary instrumentation (DBI) frameworks make it easy to build dynamic binary analysis (DBA) tools such as checkers and profilers. Much of the focus on DBI frameworks has been on performance; little attention has been paid to their capabilities. As a result, we believe the potential of DBI has not been fully exploited.
+
In this paper we describe Valgrind, a DBI framework designed for building heavyweight DBA tools. We focus on its unique support for shadow values-a powerful but previously little-studied and difficult-to-implement DBA technique, which requires a tool to shadow every register and memory value with another value that describes it. This support accounts for several crucial design features that distinguish Valgrind from other DBI frameworks. Because of these features, lightweight tools built with Valgrind run comparatively slowly, but Valgrind can be used to build more interesting, heavyweight tools that are difficult or impossible to build with other DBI frameworks such as Pin and DynamoRIO},
keywords = {dynamic binary instrumentation},
issn = {0362-1340},
@@ -6135,7 +6122,7 @@ In this paper we describe Valgrind, a DBI framework designed for building heavyw
year = {2006},
month = {June},
address = {Cambridge, UK},
- abstract = {A growing field of literature is studying how usability impacts security [4]. One class of security software is anonymizing networks--- overlay networks on the Internet that provide privacy by letting users transact (for example, fetch a web page or send an email) without revealing their communication partners.
+ abstract = {A growing field of literature is studying how usability impacts security [4]. One class of security software is anonymizing networks--- overlay networks on the Internet that provide privacy by letting users transact (for example, fetch a web page or send an email) without revealing their communication partners.
In this position paper we focus on the network effects of usability on privacy and security: usability is a factor as before, but the size of the user base also becomes a factor. We show that in anonymizing networks, even if you were smart enough and had enough time to use every system perfectly, you would nevertheless be right to choose your system based in part on its usability for other users},
keywords = {anonymity, privacy},
www_section = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.510},
@@ -6149,7 +6136,7 @@ In this position paper we focus on the network effects of usability on privacy a
volume = {4661/2007},
year = {2006},
pages = {281--300},
- abstract = {We propose a framework in which anonymity protocols are interpreted as particular kinds of channels, and the degree of anonymity provided by the protocol as the converse of the channel{\textquoteright}s capacity. We also investigate how the adversary can test the system to try to infer the user{\textquoteright}s identity, and we study how his probability of success depends on the characteristics of the channel. We then illustrate how various notions of anonymity can be expressed in this framework, and show the relation with some definitions of probabilistic anonymity in literature.
+ abstract = {We propose a framework in which anonymity protocols are interpreted as particular kinds of channels, and the degree of anonymity provided by the protocol as the converse of the channel{\textquoteright}s capacity. We also investigate how the adversary can test the system to try to infer the user{\textquoteright}s identity, and we study how his probability of success depends on the characteristics of the channel. We then illustrate how various notions of anonymity can be expressed in this framework, and show the relation with some definitions of probabilistic anonymity in literature.
This work has been partially supported by the INRIA DREI {\'E}quipe Associ{\'e}e PRINTEMPS. The work of Konstantinos Chatzikokolakis and Catuscia Palamidessi has been also supported by the INRIA ARC project ProNoBiS},
keywords = {anonymity},
isbn = {978-3-540-75333-9},
@@ -6186,7 +6173,7 @@ This work has been partially supported by the INRIA DREI {\'E}quipe Associ{\'e}e
publisher = {Springer},
organization = {Springer},
address = {Cambridge, UK},
- abstract = {Currently fielded anonymous communication systems either introduce too much delay and thus have few users and little security, or have many users but too little delay to provide protection against large attackers. By combining the user bases into the same network, and ensuring that all traffic is mixed together, we hope to lower delay and improve anonymity for both sets of users.
+ abstract = {Currently fielded anonymous communication systems either introduce too much delay and thus have few users and little security, or have many users but too little delay to provide protection against large attackers. By combining the user bases into the same network, and ensuring that all traffic is mixed together, we hope to lower delay and improve anonymity for both sets of users.
Alpha-mixing is an approach that can be added to traditional batching strategies to let senders specify for each message whether they prefer security or speed. Here we describe how to add alpha-mixing to various mix designs, and show that mix networks with this feature can provide increased anonymity for all senders in the network. Along the way we encounter subtle issues to do with the attacker{\textquoteright}s knowledge of the security parameters of the users},
keywords = {anonymity},
isbn = {978-3-540-68790-0},
@@ -6318,7 +6305,7 @@ Alpha-mixing is an approach that can be added to traditional batching strategies
series = {Lecture Notes in Computer Science},
volume = {Volume 4124/2006},
year = {2006},
- abstract = {Our recently proposed scalable source routing (SSR) protocol combines source routing in the physical network with Chord-like routing in the virtual ring that is formed by the address space. Thereby, SSR provides self-organized routing in large unstructured networks of resource-limited devices. Its ability to quickly adapt to changes in the network topology makes it suitable not only for sensor-actuator networks but also for mobile ad-hoc networks. Moreover, SSR directly provides the key-based routing semantics, thereby making it an efficient basis for the scalable implementation of self-organizing, fully decentralized applications.
+ abstract = {Our recently proposed scalable source routing (SSR) protocol combines source routing in the physical network with Chord-like routing in the virtual ring that is formed by the address space. Thereby, SSR provides self-organized routing in large unstructured networks of resource-limited devices. Its ability to quickly adapt to changes in the network topology makes it suitable not only for sensor-actuator networks but also for mobile ad-hoc networks. Moreover, SSR directly provides the key-based routing semantics, thereby making it an efficient basis for the scalable implementation of self-organizing, fully decentralized applications.
In this paper we review SSR{\textquoteright}s self-organizing features and demonstrate how the combination of virtual and physical structures leads to emergence of stability and efficiency. In particular, we focus on SSR{\textquoteright}s resistance against node churn. Following the principle of combining virtual and physical structures, we propose an extension that stabilizes SSR in face of heavy node churn. Simulations demonstrate the effectiveness of this extension},
keywords = {Chord, scalable source routing, self-organization},
issn = {978-3-540-37658-3},
@@ -6483,16 +6470,16 @@ In this paper we review SSR{\textquoteright}s self-organizing features and demon
school = {KTH/Royal Institute of Technology},
type = {Doctoral},
address = {Stockholm},
- abstract = {This dissertation presents algorithms for data structures called distributed hash tables (DHT) or structured overlay networks, which are used to build scalable self-managing distributed systems. The provided algorithms guarantee lookup consistency in the presence of dynamism: they guarantee consistent lookup results in the presence of nodes joining and leaving. Similarly, the algorithms guarantee that routing never fails while nodes join and leave. Previous algorithms for lookup consistency either suffer from starvation, do not work in the presence of failures, or lack proof of correctness.
-
-Several group communication algorithms for structured overlay networks are presented. We provide an overlay broadcast algorithm, which unlike previous algorithms avoids redundant messages, reaching all nodes in O(log n) time, while using O(n) messages, where n is the number of nodes in the system. The broadcast algorithm is used to build overlay multicast.
-
-
- We introduce bulk operation, which enables a node to efficiently make multiple lookups or send a message to all nodes in a specified set of identifiers. The algorithm ensures that all specified nodes are reached in O(log n) time, sending maximum O(log n) messages per node, regardless of the input size of the bulk operation. Moreover, the algorithm avoids sending redundant messages. Previous approaches required multiple lookups, which consume more messages and can render the initiator a bottleneck. Our algorithms are used in DHT-based storage systems, where nodes can do thousands of lookups to fetch large files. We use the bulk operation algorithm to construct a pseudo-reliable broadcast algorithm. Bulk operations can also be used to implement efficient range queries.
-
-
- Finally, we describe a novel way to place replicas in a DHT, called symmetric replication, that enables parallel recursive lookups. Parallel lookups are known to reduce latencies. However, costly iterative lookups have previously been used to do parallel lookups. Moreover, joins or leaves only require exchanging O(1) messages, while other schemes require at least log(f) messages for a replication degree of f.
-
+ abstract = {This dissertation presents algorithms for data structures called distributed hash tables (DHT) or structured overlay networks, which are used to build scalable self-managing distributed systems. The provided algorithms guarantee lookup consistency in the presence of dynamism: they guarantee consistent lookup results in the presence of nodes joining and leaving. Similarly, the algorithms guarantee that routing never fails while nodes join and leave. Previous algorithms for lookup consistency either suffer from starvation, do not work in the presence of failures, or lack proof of correctness.
+
+Several group communication algorithms for structured overlay networks are presented. We provide an overlay broadcast algorithm, which unlike previous algorithms avoids redundant messages, reaching all nodes in O(log n) time, while using O(n) messages, where n is the number of nodes in the system. The broadcast algorithm is used to build overlay multicast.
+
+
+ We introduce bulk operation, which enables a node to efficiently make multiple lookups or send a message to all nodes in a specified set of identifiers. The algorithm ensures that all specified nodes are reached in O(log n) time, sending maximum O(log n) messages per node, regardless of the input size of the bulk operation. Moreover, the algorithm avoids sending redundant messages. Previous approaches required multiple lookups, which consume more messages and can render the initiator a bottleneck. Our algorithms are used in DHT-based storage systems, where nodes can do thousands of lookups to fetch large files. We use the bulk operation algorithm to construct a pseudo-reliable broadcast algorithm. Bulk operations can also be used to implement efficient range queries.
+
+
+ Finally, we describe a novel way to place replicas in a DHT, called symmetric replication, that enables parallel recursive lookups. Parallel lookups are known to reduce latencies. However, costly iterative lookups have previously been used to do parallel lookups. Moreover, joins or leaves only require exchanging O(1) messages, while other schemes require at least log(f) messages for a replication degree of f.
+
The algorithms have been implemented in a middleware called the Distributed k-ary System (DKS), which is briefly described},
keywords = {distributed hash table, distributed k-ary system, DKS},
www_section = {http://eprints.sics.se/516/},
@@ -7105,10 +7092,10 @@ The algorithms have been implemented in a middleware called the Distributed k-ar
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {Berlin, Heidelberg},
- abstract = {In this work we provide efficient distributed protocols for generating shares of random noise, secure against malicious participants. The purpose of the noise generation is to create a distributed implementation of the privacy-preserving statistical databases described in recent papers [14, 4, 13]. In these databases, privacy is obtained by perturbing the true answer to a database query by the addition of a small amount of
-Gaussian or exponentially distributed random noise. The computational power of even a simple form of these databases, when the query is just of the
-form sum over all rows {\textquoteright}i{\textquoteright} in the database of a function
-<i> f </i> applied to the data in row i, has been demonstrated in [4]. A distributed implementation eliminates the need for a trusted database administrator. The results for noise generation are of independent interest. The generation of Gaussian noise introduces a technique for distributing shares of many unbiased coins with fewer executions of verifiable secret sharing than would be needed using previous approaches (reduced by a factor of n). The generation of exponentially distributed noise uses
+ abstract = {In this work we provide efficient distributed protocols for generating shares of random noise, secure against malicious participants. The purpose of the noise generation is to create a distributed implementation of the privacy-preserving statistical databases described in recent papers [14, 4, 13]. In these databases, privacy is obtained by perturbing the true answer to a database query by the addition of a small amount of
+Gaussian or exponentially distributed random noise. The computational power of even a simple form of these databases, when the query is just of the
+form sum over all rows {\textquoteright}i{\textquoteright} in the database of a function
+<i> f </i> applied to the data in row i, has been demonstrated in [4]. A distributed implementation eliminates the need for a trusted database administrator. The results for noise generation are of independent interest. The generation of Gaussian noise introduces a technique for distributing shares of many unbiased coins with fewer executions of verifiable secret sharing than would be needed using previous approaches (reduced by a factor of n). The generation of exponentially distributed noise uses
two shallow circuits: one for generating many arbitrarily but identically biased coins at an amortized cost of two unbiased random bits apiece, independent of the bias, and the other to combine bits of appropriate biases to obtain an exponential distribution},
isbn = {3-540-34546-9, 978-3-540-34546-6},
doi = {10.1007/11761679_29},
@@ -7384,8 +7371,8 @@ two shallow circuits: one for generating many arbitrarily but identically biased
year = {2006},
note = {only published on CD},
pages = {1--52},
- abstract = {Although the benefits of information sharing between supply-chain partners are well known, many companies are averse to share their {\textquotedblleft}private{\textquotedblright} information due to fear of adverse impact of information leakage.
-This paper uses techniques from Secure Multiparty Computation (SMC) to develop {\textquotedblleft}secure protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting, and Replenishment) business process. The result is a process that permits supply-chain partners to capture all of the benefits of information-sharing and collaborative decision-making, but without disclosing their {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions) and cost information to one another. In our collaborative CPFR) scenario, the retailer and supplier engage in SMC protocols that result in: (1) a forecast that uses both the retailers and the suppliers observed demand signals to better forecast demand; and (2) prescribed order/shipment quantities based on system-wide costs and inventory levels (and on the joint forecasts) that minimize supply-chain expected cost/period. Our contributions are as follows: (1) we demonstrate that CPFR can be securely implemented without disclosing the private information of either partner; (2) we show that the CPFR business process is not incentive compatible without transfer payments and develop an incentive-compatible linear transfer-payment scheme for
+ abstract = {Although the benefits of information sharing between supply-chain partners are well known, many companies are averse to share their {\textquotedblleft}private{\textquotedblright} information due to fear of adverse impact of information leakage.
+This paper uses techniques from Secure Multiparty Computation (SMC) to develop {\textquotedblleft}secure protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting, and Replenishment) business process. The result is a process that permits supply-chain partners to capture all of the benefits of information-sharing and collaborative decision-making, but without disclosing their {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions) and cost information to one another. In our collaborative CPFR) scenario, the retailer and supplier engage in SMC protocols that result in: (1) a forecast that uses both the retailers and the suppliers observed demand signals to better forecast demand; and (2) prescribed order/shipment quantities based on system-wide costs and inventory levels (and on the joint forecasts) that minimize supply-chain expected cost/period. Our contributions are as follows: (1) we demonstrate that CPFR can be securely implemented without disclosing the private information of either partner; (2) we show that the CPFR business process is not incentive compatible without transfer payments and develop an incentive-compatible linear transfer-payment scheme for
collaborative forecasting; (3) we demonstrate that our protocols are not only secure (i.e., privacy preserving), but that neither partner is able to make accurate inferences about the others future demand signals from the outputs of the protocols; and (4) we illustrate the benefits of secure collaboration using simulation},
keywords = {chain computation management, CPFR, privacy, secure multi-party computation, secure supply, security, SMC},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Secure\%20Collaborative\%20Planning\%20Forecasting\%20and\%20Replenishment.pdf},
@@ -7584,7 +7571,7 @@ collaborative forecasting; (3) we demonstrate that our protocols are not only se
pages = {285--304},
publisher = {Springer Berlin Heidelberg},
organization = {Springer Berlin Heidelberg},
- abstract = {We show that if a set of players hold shares of a value a {\epsilon} Fp for some prime p (where the set of shares is written [a] p ), it is possible to compute, in constant rounds and with unconditional security, sharings of the bits of a, i.e., compute sharings [a0] p , ..., [al- 1] p such that l = ⌈ log2 p ⌉, a0,...,al--1 {\epsilon} {0,1} and a = summation of ai * 2^i where 0 <= i <= l- 1. Our protocol is secure against active adversaries and works for any linear secret sharing scheme with a multiplication protocol. The complexity of our protocol is O(llogl) invocations of the multiplication protocol for the underlying secret sharing scheme, carried out in O(1) rounds.
+ abstract = {We show that if a set of players hold shares of a value a {\epsilon} Fp for some prime p (where the set of shares is written [a] p ), it is possible to compute, in constant rounds and with unconditional security, sharings of the bits of a, i.e., compute sharings [a0] p , ..., [al- 1] p such that l = ⌈ log2 p ⌉, a0,...,al--1 {\epsilon} {0,1} and a = summation of ai * 2^i where 0 <= i <= l- 1. Our protocol is secure against active adversaries and works for any linear secret sharing scheme with a multiplication protocol. The complexity of our protocol is O(llogl) invocations of the multiplication protocol for the underlying secret sharing scheme, carried out in O(1) rounds.
This result immediately implies solutions to other long-standing open problems such as constant-rounds and unconditionally secure protocols for deciding whether a shared number is zero, comparing shared numbers, raising a shared number to a shared exponent and reducing a shared number modulo a shared modulus},
isbn = {978-3-540-32731-8},
doi = {10.1007/11681878_15},
@@ -7699,7 +7686,7 @@ This result immediately implies solutions to other long-standing open problems s
month = jan,
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {Anonymous communication with onions requires that a user application determines the whole routing path of an onion. This scenario has certain disadvantages, it might be dangerous in some situations, and it does not fit well to the current layered architecture of dynamic communication networks.
+ abstract = {Anonymous communication with onions requires that a user application determines the whole routing path of an onion. This scenario has certain disadvantages, it might be dangerous in some situations, and it does not fit well to the current layered architecture of dynamic communication networks.
We show that applying encoding based on universal re-encryption can solve many of these problems by providing much flexibility -- the onions can be created on-the-fly or in advance by different parties},
keywords = {onion routing, universal re-encryption},
isbn = {978-3-540-24302-1},
@@ -7869,8 +7856,8 @@ We show that applying encoding based on universal re-encryption can solve many o
pages = {302--321},
publisher = {Springer},
organization = {Springer},
- abstract = {This paper presents efficient off-line anonymous e-cash schemes where a user can withdraw a wallet containing 2^l coins each of which she can spend unlinkably. Our first result is a scheme, secure under the strong RSA and the y-DDHI assumptions, where the complexity of the withdrawal and spend operations is O(l+k) and the user{\textquoteright}s wallet can be stored using O(l+k) bits, where k is a security parameter. The best previously known schemes require at least one of these complexities to be O(2^l k). In fact, compared to previous e-cash schemes, our whole wallet of 2^l coins has about the same size as one coin in these schemes. Our scheme also offers exculpability of users, that is, the bank can prove to third parties that a user has double-spent.
-
+ abstract = {This paper presents efficient off-line anonymous e-cash schemes where a user can withdraw a wallet containing 2^l coins each of which she can spend unlinkably. Our first result is a scheme, secure under the strong RSA and the y-DDHI assumptions, where the complexity of the withdrawal and spend operations is O(l+k) and the user{\textquoteright}s wallet can be stored using O(l+k) bits, where k is a security parameter. The best previously known schemes require at least one of these complexities to be O(2^l k). In fact, compared to previous e-cash schemes, our whole wallet of 2^l coins has about the same size as one coin in these schemes. Our scheme also offers exculpability of users, that is, the bank can prove to third parties that a user has double-spent.
+
We then extend our scheme to our second result, the first e-cash scheme that provides traceable coins without a trusted third party. That is, once a user has double spent one of the 2^l coins in her wallet, all her spendings of these coins can be traced. We present two alternate constructions. One construction shares the same complexities with our first result but requires a strong bilinear map assumption that is only conjectured to hold on MNT curves. The second construction works on more general types of elliptic curves, but the price for this is that the complexity of the spending and of the withdrawal protocols becomes O(lk) and O(lk + k^2) bits, respectively, and wallets take O(lk) bits of storage. All our schemes are secure in the random oracle model},
isbn = {3-540-25910-4},
doi = {10.1007/b136415},
@@ -8006,8 +7993,8 @@ We then extend our scheme to our second result, the first e-cash scheme that pro
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Berkeley, CA, USA},
- abstract = {The Internet is composed of many independent autonomous systems (ASes) that exchange reachability information to destinations using the Border Gateway Protocol (BGP). Network operators in each AS configure BGP routers to control the routes that are learned, selected, and announced to other routers. Faults in BGP configuration can cause forwarding loops, packet loss, and unintended paths between hosts, each of which constitutes a failure of the Internet routing infrastructure.
-
+ abstract = {The Internet is composed of many independent autonomous systems (ASes) that exchange reachability information to destinations using the Border Gateway Protocol (BGP). Network operators in each AS configure BGP routers to control the routes that are learned, selected, and announced to other routers. Faults in BGP configuration can cause forwarding loops, packet loss, and unintended paths between hosts, each of which constitutes a failure of the Internet routing infrastructure.
+
This paper describes the design and implementation of rcc, the router configuration checker, a tool that finds faults in BGP configurations using static analysis. rcc detects faults by checking constraints that are based on a high-level correctness specification. rcc detects two broad classes of faults: route validity faults, where routers may learn routes that do not correspond to usable paths, and path visibility faults, where routers may fail to learn routes for paths that exist in the network. rcc enables network operators to test and debug configurations before deploying them in an operational network, improving on the status quo where most faults are detected only during operation. rcc has been downloaded by more than sixty-five network operators to date, some of whom have shared their configurations with us. We analyze network-wide configurations from 17 different ASes to detect a wide variety of faults and use these findings to motivate improvements to the Internet routing infrastructure},
keywords = {autonomous systems, border gateway protocol},
www_section = {http://portal.acm.org/citation.cfm?id=1251207$\#$},
@@ -8204,7 +8191,7 @@ This paper describes the design and implementation of rcc, the router configurat
pages = {169--187},
publisher = {Springer-Verlag, LNCS 3621},
organization = {Springer-Verlag, LNCS 3621},
- abstract = {Anonymous channels are necessary for a multitude of privacy-protecting protocols. Onion routing is probably the best known way to achieve anonymity in practice. However, the cryptographic aspects of onion routing have not been sufficiently explored: no satisfactory definitions of security have been given, and existing constructions have only had ad-hoc security analysis for the most part.
+ abstract = {Anonymous channels are necessary for a multitude of privacy-protecting protocols. Onion routing is probably the best known way to achieve anonymity in practice. However, the cryptographic aspects of onion routing have not been sufficiently explored: no satisfactory definitions of security have been given, and existing constructions have only had ad-hoc security analysis for the most part.
We provide a formal definition of onion-routing in the universally composable framework, and also discover a simpler definition (similar to CCA2 security for encryption) that implies security in the UC framework. We then exhibit an efficient and easy to implement construction of an onion routing scheme satisfying this definition},
keywords = {onion routing, privacy},
isbn = {978-3-540-28114-6},
@@ -8240,8 +8227,8 @@ We provide a formal definition of onion-routing in the universally composable fr
publisher = {Springer},
organization = {Springer},
address = {Aarhus, Denmark },
- abstract = {We introduce a new type of Identity-Based Encryption (IBE) scheme that we call Fuzzy Identity-Based Encryption. In Fuzzy IBE we view an identity as set of descriptive attributes. A Fuzzy IBE scheme allows for a private key for an identity, ω, to decrypt a ciphertext encrypted with an identity, ω , if and only if the identities ω and ω are close to each other as measured by the {\textquotedblleft}set overlap{\textquotedblright} distance metric. A Fuzzy IBE scheme can be applied to enable encryption
-using biometric inputs as identities; the error-tolerance property of a Fuzzy IBE scheme is precisely what allows for the use of biometric identities, which inherently will have some noise each time they are sampled. Additionally, we show that Fuzzy-IBE can be used for a type of application that we term {\textquotedblleft}attribute-based encryption{\textquotedblright}.
+ abstract = {We introduce a new type of Identity-Based Encryption (IBE) scheme that we call Fuzzy Identity-Based Encryption. In Fuzzy IBE we view an identity as set of descriptive attributes. A Fuzzy IBE scheme allows for a private key for an identity, ω, to decrypt a ciphertext encrypted with an identity, ω , if and only if the identities ω and ω are close to each other as measured by the {\textquotedblleft}set overlap{\textquotedblright} distance metric. A Fuzzy IBE scheme can be applied to enable encryption
+using biometric inputs as identities; the error-tolerance property of a Fuzzy IBE scheme is precisely what allows for the use of biometric identities, which inherently will have some noise each time they are sampled. Additionally, we show that Fuzzy-IBE can be used for a type of application that we term {\textquotedblleft}attribute-based encryption{\textquotedblright}.
In this paper we present two constructions of Fuzzy IBE schemes. Our constructions can be viewed as an Identity-Based Encryption of a message under several attributes that compose a (fuzzy) identity. Our IBE schemes are both error-tolerant and secure against collusion attacks. Additionally, our basic construction does not use random oracles. We prove the security of our schemes under the Selective-ID security model},
keywords = {Fuzzy IBE, IBE},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/EUROCRYPT\%2705\%20-\%20Fuzzy\%20Identity-Based\%20Encryption.pdf},
@@ -8464,8 +8451,8 @@ In this paper we present two constructions of Fuzzy IBE schemes. Our constructio
month = {September},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {We consider anonymous communication protocols based on onions: each message is sent in an encrypted form through a path chosen at random by its sender, and the message is re-coded by each server on the path. Recently, it has been shown that if the anonymous paths are long enough, then the protocols provide provable security for some adversary models. However, it was assumed that all users choose intermediate servers uniformly at random from the same set of servers.
-We show that if a single user chooses only from a constrained subset of possible intermediate servers, anonymity level may dramatically decrease. A thumb rule is that if Alice is aware of much less than 50\% of possible intermediate servers, then the anonymity set for her message becomes surprisingly small with high probability. Moreover, for each location in the anonymity set an adversary may compute probability that it gets a message of Alice. Since there are big differences in these probabilities, in most cases the true destination of the message from Alice is in a small group of locations with the highest probabilities.
+ abstract = {We consider anonymous communication protocols based on onions: each message is sent in an encrypted form through a path chosen at random by its sender, and the message is re-coded by each server on the path. Recently, it has been shown that if the anonymous paths are long enough, then the protocols provide provable security for some adversary models. However, it was assumed that all users choose intermediate servers uniformly at random from the same set of servers.
+We show that if a single user chooses only from a constrained subset of possible intermediate servers, anonymity level may dramatically decrease. A thumb rule is that if Alice is aware of much less than 50\% of possible intermediate servers, then the anonymity set for her message becomes surprisingly small with high probability. Moreover, for each location in the anonymity set an adversary may compute probability that it gets a message of Alice. Since there are big differences in these probabilities, in most cases the true destination of the message from Alice is in a small group of locations with the highest probabilities.
Our results contradict some beliefs that the protocols mentioned guarantee anonymity provided that the set of possible intermediate servers for each user is large},
keywords = {anonymity measurement, onion routing},
isbn = {978-3-540-28963-0},
@@ -8556,7 +8543,7 @@ Our results contradict some beliefs that the protocols mentioned guarantee anony
year = {2005},
type = {publication},
address = {Kaiserslautern, Germany},
- abstract = {Peer-to-peer overlay networks have grown significantly in size and sophistication over the last years. Meanwhile, distributed hash tables (DHT) provide efficient means to create global scale overlay networks on top of which various applications can be built. Although filesharing still is the most prominent example, other applications are well conceivable. In order to rationally design such applications, it is important to know (and understand) the properties of the overlay networks as seen from the respective application.
+ abstract = {Peer-to-peer overlay networks have grown significantly in size and sophistication over the last years. Meanwhile, distributed hash tables (DHT) provide efficient means to create global scale overlay networks on top of which various applications can be built. Although filesharing still is the most prominent example, other applications are well conceivable. In order to rationally design such applications, it is important to know (and understand) the properties of the overlay networks as seen from the respective application.
This paper reports the results from a two week measurement of the entire Overnet network, the currently most widely deployed DHT-based overlay. We describe both, the design choices that made that measurement feasible and the results from the measurement itself. Besides the basic determination of network size, node availability and node distribution, we found unexpected results for the overlay latency distribution},
keywords = {distributed hash table, overlay networks, P2P},
isbn = {978-3-540-24473-8},
@@ -8648,8 +8635,8 @@ This paper reports the results from a two week measurement of the entire Overnet
title = {Obfuscated Ciphertext Mixing},
year = {2005},
month = {November},
- abstract = {Mixnets are a type of anonymous channel composed of a handful of trustees that, each in turn, shu$\#$e and rerandomize a batch ciphertexts. For applications that require verifiability, each trustee provides a proof of correct mixing. Though mixnets have recently been made quite e$\#$cient, they still require secret computation and proof generation after the mixing process.
-
+ abstract = {Mixnets are a type of anonymous channel composed of a handful of trustees that, each in turn, shu$\#$e and rerandomize a batch ciphertexts. For applications that require verifiability, each trustee provides a proof of correct mixing. Though mixnets have recently been made quite e$\#$cient, they still require secret computation and proof generation after the mixing process.
+
We introduce and implement Obfuscated Ciphertext Mixing, the obfuscation of a mixnet program. Using this technique, all proofs can be performed before the mixing process, even before the inputs are available. In addition, the mixing program does not need to be secret: anyone can publicly compute the shuffle (though not the decryption). We frame this functionality in the strongest obfuscation setting proposed by Barak et. al. [4], tweaked for the public-key setting. For applications where the secrecy of the shuffle permutation is particularly important (e.g. voting), we also consider the Distributed Obfuscation of a Mixer, where multiple trustees cooperate to generate an obfuscated mixer program such that no single trustee knows the composed shuffle permutation},
keywords = {obfuscated ciphertext mixing},
www_section = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.6592},
@@ -8729,8 +8716,8 @@ We introduce and implement Obfuscated Ciphertext Mixing, the obfuscation of a mi
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {Lisboa, Portugal},
- abstract = {We introduce Pastis, a completely decentralized multi-user read-write peer-to-peer file system. In Pastis every file is described by a modifiable inode-like structure which contains the addresses of the immutable blocks in which the file contents are stored. All data are stored using the Past distributed hash table (DHT), which we have modified in order to reduce the number of network messages it generates, thus optimizing replica retrieval.
-Pastis{\textquoteright} design is simple compared to other existing systems, as it does not require complex algorithms like Byzantine-fault tolerant (BFT) replication or a central administrative authority. It is also highly scalable in terms of the number of network nodes and users sharing a given file or portion of the file system. Furthermore, Pastis takes advantage of the fault tolerance and good locality properties of its underlying storage layer, the Past DHT.
+ abstract = {We introduce Pastis, a completely decentralized multi-user read-write peer-to-peer file system. In Pastis every file is described by a modifiable inode-like structure which contains the addresses of the immutable blocks in which the file contents are stored. All data are stored using the Past distributed hash table (DHT), which we have modified in order to reduce the number of network messages it generates, thus optimizing replica retrieval.
+Pastis{\textquoteright} design is simple compared to other existing systems, as it does not require complex algorithms like Byzantine-fault tolerant (BFT) replication or a central administrative authority. It is also highly scalable in terms of the number of network nodes and users sharing a given file or portion of the file system. Furthermore, Pastis takes advantage of the fault tolerance and good locality properties of its underlying storage layer, the Past DHT.
We have developed a prototype based on the FreePastry open-source implementation of the Past DHT. We have used this prototype to evaluate several characteristics of our file system design. Supporting the close-to-open consistency model, plus a variant of the read-your-writes model, our prototype shows that Pastis is between 1.4 to 1.8 times slower than NFS. In comparison, Ivy and Oceanstore are between two to three times slower than NFS},
keywords = {distributed hash table, multi-user, Pastis, peer-to-peer file system, read-write},
doi = {10.1007/11549468_128},
@@ -9039,7 +9026,7 @@ We have developed a prototype based on the FreePastry open-source implementation
publisher = {Springer},
organization = {Springer},
chapter = {8},
- abstract = {Several different approaches to realizing the basic principles of DHTs have emerged over the last few years. Although they rely on the same fundamental idea, there is a large diversity of methods for both organizing the identifier space and performing routing. The particular properties of each approach can thus be exploited by specific application scenarios and requirements.
+ abstract = {Several different approaches to realizing the basic principles of DHTs have emerged over the last few years. Although they rely on the same fundamental idea, there is a large diversity of methods for both organizing the identifier space and performing routing. The particular properties of each approach can thus be exploited by specific application scenarios and requirements.
This overview focuses on the three DHT systems that have received the most attention in the research community: Chord, Pastry, and Content Addressable Networks (CAN). Furthermore, the systems Symphony, Viceroy, and Kademlia are discussed because they exhibit interesting mechanisms and properties beyond those of the first three systems},
keywords = {CAN, Chord, Content Addressable Networks, dblp, distributed hash table, Kademlia, Pastry, Symphony, Viceroy},
isbn = {3-540-29192-X},
@@ -9086,7 +9073,7 @@ This overview focuses on the three DHT systems that have received the most atten
organization = {Springer Berlin / Heidelberg},
type = {publication},
address = {Waterloo, Canada},
- abstract = {Most routing protocols employ address aggregation to achieve scalability with respect to routing table size. But often, as networks grow in size and complexity, address aggregation fails. Other networks, e.g. sensor-actuator networks or ad-hoc networks, that are characterized by organic growth might not at all follow the classical hierarchical structures that are required for aggregation.
+ abstract = {Most routing protocols employ address aggregation to achieve scalability with respect to routing table size. But often, as networks grow in size and complexity, address aggregation fails. Other networks, e.g. sensor-actuator networks or ad-hoc networks, that are characterized by organic growth might not at all follow the classical hierarchical structures that are required for aggregation.
In this paper, we present a fully self-organizing routing scheme that is able to efficiently route messages in random networks with randomly assigned node addresses. The protocol combines peer-to-peer techniques with source routing and can be implemented to work with very limited resource demands. With the help of simulations we show that it nevertheless quickly converges into a globally consistent state and achieves a routing stretch of only 1.2 -- 1.3 in a network with more than 105 randomly assigned nodes},
keywords = {ad-hoc networks, P2P, self-organization},
isbn = {978-3-540-25809-4},
@@ -9251,7 +9238,7 @@ In this paper, we present a fully self-organizing routing scheme that is able to
year = {2005},
month = {July},
publisher = {University of Cambridge Computer Laboratory},
- abstract = {This is a short talk on topology of covert conflict, comprising joint work I{\textquoteright}ve been doing with Ross Anderson. The background of this work is the following. We consider a conflict, and there are parties to the conflict. There is communication going on that can be abstracted as a network of nodes (parties) and links (social ties between the nodes). We contend that once you{\textquoteright}ve got a conflict and you{\textquoteright}ve got enough parties to it, these guys start communicating as a result of the conflict. They form connections, that influences the conflict, and the dynamics of the conflict in turn feeds the connectivity of the unfolding network.
+ abstract = {This is a short talk on topology of covert conflict, comprising joint work I{\textquoteright}ve been doing with Ross Anderson. The background of this work is the following. We consider a conflict, and there are parties to the conflict. There is communication going on that can be abstracted as a network of nodes (parties) and links (social ties between the nodes). We contend that once you{\textquoteright}ve got a conflict and you{\textquoteright}ve got enough parties to it, these guys start communicating as a result of the conflict. They form connections, that influences the conflict, and the dynamics of the conflict in turn feeds the connectivity of the unfolding network.
Modern conflicts often turn on connectivity: consider, for instance, anything from the American army{\textquoteright}s attack on the Taleban in Afghanistan, and elsewhere, or medics who are trying to battle a disease, like Aids, or anything else. All of these turn on, making strategic decisions about which nodes to go after in the network. For instance, you could consider that a good first place to give condoms out and start any Aids programme, would be with prostitutes},
doi = {10.1007/978-3-540-77156-2},
www_section = {http://www.springerlink.com/content/p885q38262486876/},
@@ -9267,7 +9254,7 @@ Modern conflicts often turn on connectivity: consider, for instance, anything fr
organization = {Springer Berlin / Heidelberg},
type = {publication},
address = {Innsbruck, Austria},
- abstract = {With an ever-growing number of computers being embedded into our surroundings, the era of ubiquitous computing is approaching fast. However, as the number of networked devices increases, so does system complexity. Contrary to the goal of achieving an invisible computer, the required amount of management and human intervention increases more and more, both slowing down the growth rate and limiting the achievable size of ubiquitous systems.
+ abstract = {With an ever-growing number of computers being embedded into our surroundings, the era of ubiquitous computing is approaching fast. However, as the number of networked devices increases, so does system complexity. Contrary to the goal of achieving an invisible computer, the required amount of management and human intervention increases more and more, both slowing down the growth rate and limiting the achievable size of ubiquitous systems.
In this paper we present a novel routing approach that is capable of handling complex networks without any administrative intervention. Based on a combination of standard overlay routing techniques and source routes, this approach is capable of efficiently bootstrapping a routable network. Unlike other approaches that try to combine peer-to-peer ideas with ad-hoc networks, sensor networks, or ubiquitous systems, our approach is not based on a routing scheme. This makes the resulting system flexible and powerful with respect at application support as well as efficient with regard to routing overhead and system complexity},
keywords = {autonomous systems, overlay networks, P2P},
isbn = {978-3-540-25273-3},
@@ -9371,7 +9358,7 @@ In this paper we present a novel routing approach that is capable of handling co
pages = {1--16},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {Traditional methods for evaluating the amount of anonymity afforded by various Mix configurations have depended on either measuring the size of the set of possible senders of a particular message (the anonymity set size), or by measuring the entropy associated with the probability distribution of the messages possible senders. This paper explores further an alternative way of assessing the anonymity of a Mix system by considering the capacity of a covert channel from a sender behind the Mix to an observer of the Mix{\textquoteright}s output.
+ abstract = {Traditional methods for evaluating the amount of anonymity afforded by various Mix configurations have depended on either measuring the size of the set of possible senders of a particular message (the anonymity set size), or by measuring the entropy associated with the probability distribution of the messages possible senders. This paper explores further an alternative way of assessing the anonymity of a Mix system by considering the capacity of a covert channel from a sender behind the Mix to an observer of the Mix{\textquoteright}s output.
Initial work considered a simple model, with an observer (Eve) restricted to counting the number of messages leaving a Mix configured as a firewall guarding an enclave with one malicious sender (Alice) and some other naive senders (Cluelessi{\textquoteright}s). Here, we consider the case where Eve can distinguish between multiple destinations, and the senders can select to which destination their message (if any) is sent each clock tick},
isbn = {978-3-540-26203-9},
doi = {10.1007/b136164},
@@ -9408,7 +9395,7 @@ Initial work considered a simple model, with an observer (Eve) restricted to cou
month = {August},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {Encapsulating messages in onions is one of the major techniques providing anonymous communication in computer networks. To some extent, it provides security against traffic analysis by a passive adversary. However, it can be highly vulnerable to attacks by an active adversary. For instance, the adversary may perform a simple so--called repetitive attack: a malicious server sends the same massage twice, then the adversary traces places where the same message appears twice -- revealing the route of the original message. A repetitive attack was examined for mix--networks. However, none of the countermeasures designed is suitable for onion--routing.
+ abstract = {Encapsulating messages in onions is one of the major techniques providing anonymous communication in computer networks. To some extent, it provides security against traffic analysis by a passive adversary. However, it can be highly vulnerable to attacks by an active adversary. For instance, the adversary may perform a simple so--called repetitive attack: a malicious server sends the same massage twice, then the adversary traces places where the same message appears twice -- revealing the route of the original message. A repetitive attack was examined for mix--networks. However, none of the countermeasures designed is suitable for onion--routing.
In this paper we propose an {\textquotedblleft}onion-like{\textquotedblright} encoding design based on universal reencryption. The onions constructed in this way can be used in a protocol that achieves the same goals as the classical onions, however, at the same time we achieve immunity against a repetitive attack. Even if an adversary disturbs communication and prevents processing a message somewhere on the onion path, it is easy to identify the malicious server performing the attack and provide an evidence of its illegal behavior},
keywords = {onion routing, repetitive attack, universal re-encryption, unlinkability},
isbn = {978-3-540-24302-1},
@@ -9520,7 +9507,7 @@ In this paper we propose an {\textquotedblleft}onion-like{\textquotedblright} en
year = {2004},
month = {September},
address = {France},
- abstract = {We evaluate the anonymity provided by two popular email mix implementations, Mixmaster and Reliable, and compare their effectiveness through the use of simulations which model the algorithms used by these mixing applications. Our simulations are based on actual traffic data obtained from a public anonymous remailer (mix node). We determine that assumptions made in previous literature about the distribution of mix input traffic are incorrect: in particular, the input traffic does not follow a Poisson distribution. We establish for the first time that a lower bound exists on the anonymity of Mixmaster, and discover that under certain circumstances the algorithm used by Reliable provides no anonymity. We find that the upper bound on anonymity provided by Mixmaster is slightly higher than that provided by Reliable.
+ abstract = {We evaluate the anonymity provided by two popular email mix implementations, Mixmaster and Reliable, and compare their effectiveness through the use of simulations which model the algorithms used by these mixing applications. Our simulations are based on actual traffic data obtained from a public anonymous remailer (mix node). We determine that assumptions made in previous literature about the distribution of mix input traffic are incorrect: in particular, the input traffic does not follow a Poisson distribution. We establish for the first time that a lower bound exists on the anonymity of Mixmaster, and discover that under certain circumstances the algorithm used by Reliable provides no anonymity. We find that the upper bound on anonymity provided by Mixmaster is slightly higher than that provided by Reliable.
We identify flaws in the software in Reliable that further compromise its ability to provide anonymity, and review key areas that are necessary for the security of a mix in addition to a sound algorithm. Our analysis can be used to evaluate under which circumstances the two mixing algorithms should be used to best achieve anonymity and satisfy their purpose. Our work can also be used as a framework for establishing a security review process for mix node deployments},
isbn = {978-3-540-22987-2},
doi = {10.1007/b100085},
@@ -9633,10 +9620,10 @@ We identify flaws in the software in Reliable that further compromise its abilit
publisher = {USENIX Association},
organization = {USENIX Association},
address = {San Francisco, CA, USA},
- abstract = {Designing a wide-area distributed hash table (DHT) that provides high-throughput and low-latency network storage is a challenge. Existing systems have explored a range of solutions, including iterative routing, recursive routing, proximity routing and neighbor selection, erasure coding, replication, and server selection.
-
-This paper explores the design of these techniques and their interaction in a complete system, drawing on the measured performance of a new DHT implementation and results from a simulator with an accurate Internet latency model. New techniques that resulted from this exploration include use of latency predictions based on synthetic co-ordinates, efficient integration of lookup routing and data fetching, and a congestion control mechanism suitable for fetching data striped over large numbers of servers.
-
+ abstract = {Designing a wide-area distributed hash table (DHT) that provides high-throughput and low-latency network storage is a challenge. Existing systems have explored a range of solutions, including iterative routing, recursive routing, proximity routing and neighbor selection, erasure coding, replication, and server selection.
+
+This paper explores the design of these techniques and their interaction in a complete system, drawing on the measured performance of a new DHT implementation and results from a simulator with an accurate Internet latency model. New techniques that resulted from this exploration include use of latency predictions based on synthetic co-ordinates, efficient integration of lookup routing and data fetching, and a congestion control mechanism suitable for fetching data striped over large numbers of servers.
+
Measurements with 425 server instances running on 150 PlanetLab and RON hosts show that the latency optimizations reduce the time required to locate and fetch data by a factor of two. The throughput optimizations result in a sustainable bulk read throughput related to the number of DHT hosts times the capacity of the slowest access link; with 150 selected PlanetLab hosts, the peak aggregate throughput over multiple clients is 12.8 megabytes per second},
keywords = {distributed hash table, high-throughput, latency},
www_section = {http://dl.acm.org/citation.cfm?id=1251175.1251182},
@@ -9674,8 +9661,8 @@ Measurements with 425 server instances running on 150 PlanetLab and RON hosts sh
month = {May},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {Dining cryptographers networks (or DC-nets) are a privacy-preserving primitive devised by Chaum for anonymous message publication. A very attractive feature of the basic DC-net is its non-interactivity. Subsequent to key establishment, players may publish their messages in a single broadcast round, with no player-to-player communication. This feature is not possible in other privacy-preserving tools like mixnets. A drawback to DC-nets, however, is that malicious players can easily jam them, i.e., corrupt or block the transmission of messages from honest parties, and may do so without being traced.
-Several researchers have proposed valuable methods of detecting cheating players in DC-nets. This is usually at the cost, however, of multiple broadcast rounds, even in the optimistic case, and often of high computational and/or communications overhead, particularly for fault recovery.
+ abstract = {Dining cryptographers networks (or DC-nets) are a privacy-preserving primitive devised by Chaum for anonymous message publication. A very attractive feature of the basic DC-net is its non-interactivity. Subsequent to key establishment, players may publish their messages in a single broadcast round, with no player-to-player communication. This feature is not possible in other privacy-preserving tools like mixnets. A drawback to DC-nets, however, is that malicious players can easily jam them, i.e., corrupt or block the transmission of messages from honest parties, and may do so without being traced.
+Several researchers have proposed valuable methods of detecting cheating players in DC-nets. This is usually at the cost, however, of multiple broadcast rounds, even in the optimistic case, and often of high computational and/or communications overhead, particularly for fault recovery.
We present new DC-net constructions that simultaneously achieve non-interactivity and high-probability detection and identification of cheating players. Our proposals are quite efficient, imposing a basic cost that is linear in the number of participating players. Moreover, even in the case of cheating in our proposed system, just one additional broadcast round suffices for full fault recovery. Among other tools, our constructions employ bilinear maps, a recently popular cryptographic technique for reducing communication complexity},
keywords = {anonymity, dining cryptographers, non-interactive, privacy},
isbn = {978-3-540-21935-4},
@@ -9721,7 +9708,7 @@ We present new DC-net constructions that simultaneously achieve non-interactivit
month = {September},
publisher = {Springer Boston},
organization = {Springer Boston},
- abstract = {A serious weakness of the onion protocol, one of the major tools for anonymous communication, is its vulnerability to network failures and/or an adversary trying to break the communication. This is facilitated by the fact that each message is sent through a path of a certain length and a failure in a single point of this path prohibits message delivery. Since the path cannot be too short in order to offer anonymity protection (at least logarithmic in the number of nodes), the failure probability might be quite substantial.
+ abstract = {A serious weakness of the onion protocol, one of the major tools for anonymous communication, is its vulnerability to network failures and/or an adversary trying to break the communication. This is facilitated by the fact that each message is sent through a path of a certain length and a failure in a single point of this path prohibits message delivery. Since the path cannot be too short in order to offer anonymity protection (at least logarithmic in the number of nodes), the failure probability might be quite substantial.
The simplest solution to this problem would be to send many onions with the same message. We show that this approach can be optimized with respect to communication overhead and resilience to failures and/or adversary attacks. We propose two protocols: the first one mimics K independent onions with a single onion. The second protocol is designed for the case where an adaptive adversary may destroy communication going out of servers chosen according to the traffic observed by him. In this case a single message flows in a stream of K onions {\textemdash} the main point is that even when the adversary kills some of these onions, the stream quickly recovers to the original bandwidth {\textemdash} again K onions with this message would flow through the network},
keywords = {adaptive adversary, anonymity, onion routing},
isbn = {978-0-387-24485-3},
@@ -9733,7 +9720,7 @@ The simplest solution to this problem would be to send many onions with the same
title = {The Economics of Censorship Resistance},
booktitle = {In The Third Annual Workshop on Economics and Information Security (WEIS04},
year = {2004},
- abstract = {We propose the first economic model of censorship resistance. Early peer-to-peer systems, such as the Eternity Service, sought to achieve censorshop resistance by distributing content randomly over the whole Internet. An alternative approach is to encourage nodes to serve resources they are interested in. Both architectures have been implemented but so far there has been no quantitative analysis of the protection they provide. We develop a model inspired by economics and con
+ abstract = {We propose the first economic model of censorship resistance. Early peer-to-peer systems, such as the Eternity Service, sought to achieve censorshop resistance by distributing content randomly over the whole Internet. An alternative approach is to encourage nodes to serve resources they are interested in. Both architectures have been implemented but so far there has been no quantitative analysis of the protection they provide. We develop a model inspired by economics and con
ict theory to analyse these systems. Under our assumptions, resource distribution according to nodes{\textquoteright} individual preferences provides better stability and resistance to censorship. Our results may have wider application too},
www_section = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.7003\&rep=rep1\&type=pdf},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.4.7003\%20\%281\%29.pdf},
@@ -9869,7 +9856,7 @@ ict theory to analyse these systems. Under our assumptions, resource distributio
pages = {207--225},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {In this paper, we address issues related to flow correlation attacks and the corresponding countermeasures in mix networks. Mixes have been used in many anonymous communication systems and are supposed to provide countermeasures that can defeat various traffic analysis attacks. In this paper, we focus on a particular class of traffic analysis attack, flow correlation attacks, by which an adversary attempts to analyze the network traffic and correlate the traffic of a flow over an input link at a mix with that over an output link of the same mix. Two classes of correlation methods are considered, namely time-domain methods and frequency-domain methods. Based on our threat model and known strategies in existing mix networks, we perform extensive experiments to analyze the performance of mixes. We find that a mix with any known batching strategy may fail against flow correlation attacks in the sense that for a given flow over an input link, the adversary can correctly determine which output link is used by the same flow. We also investigated methods that can effectively counter the flow correlation attack and other timing attacks. The empirical results provided in this paper give an indication to designers of Mix networks about appropriate configurations and alternative mechanisms to be used to counter flow correlation attacks.
+ abstract = {In this paper, we address issues related to flow correlation attacks and the corresponding countermeasures in mix networks. Mixes have been used in many anonymous communication systems and are supposed to provide countermeasures that can defeat various traffic analysis attacks. In this paper, we focus on a particular class of traffic analysis attack, flow correlation attacks, by which an adversary attempts to analyze the network traffic and correlate the traffic of a flow over an input link at a mix with that over an output link of the same mix. Two classes of correlation methods are considered, namely time-domain methods and frequency-domain methods. Based on our threat model and known strategies in existing mix networks, we perform extensive experiments to analyze the performance of mixes. We find that a mix with any known batching strategy may fail against flow correlation attacks in the sense that for a given flow over an input link, the adversary can correctly determine which output link is used by the same flow. We also investigated methods that can effectively counter the flow correlation attack and other timing attacks. The empirical results provided in this paper give an indication to designers of Mix networks about appropriate configurations and alternative mechanisms to be used to counter flow correlation attacks.
This work was supported in part by the National Science Foundation under Contracts 0081761 and 0324988, by the Defense Advanced Research Projects Agency under Contract F30602-99-1-0531, and by Texas A\&M University under its Telecommunication and Information Task Force Program. Any opinions, findings, and conclusions or recommendations in this material, either expressed or implied, are those of the authors and do not necessarily reflect the views of the sponsors listed above},
keywords = {flow correlation attack},
isbn = {978-3-540-26203-9},
@@ -9949,8 +9936,8 @@ This work was supported in part by the National Science Foundation under Contrac
year = {2004},
month = {May},
address = {Toronto},
- abstract = {A passive attacker can compromise a generic anonymity protocol by applying the so called disclosure attack, i.e. a special traffic analysis attack. In this work we present a more efficient way to accomplish this goal, i.e. we need less observations by looking for unique minimal hitting sets. We call this the hitting set attack or just HS-attack.
-In general, solving the minimal hitting set problem is NP-hard. Therefore, we use frequency analysis to enhance the applicability of our attack. It is possible to apply highly efficient backtracking search algorithms. We call this approach the statistical hitting set attack or SHS-attack.
+ abstract = {A passive attacker can compromise a generic anonymity protocol by applying the so called disclosure attack, i.e. a special traffic analysis attack. In this work we present a more efficient way to accomplish this goal, i.e. we need less observations by looking for unique minimal hitting sets. We call this the hitting set attack or just HS-attack.
+In general, solving the minimal hitting set problem is NP-hard. Therefore, we use frequency analysis to enhance the applicability of our attack. It is possible to apply highly efficient backtracking search algorithms. We call this approach the statistical hitting set attack or SHS-attack.
However, the statistical hitting set attack is prone to wrong solutions with a given small probability. We use here duality checking algorithms to resolve this problem. We call this final exact attack the HS*-attack},
keywords = {anonymity, hitting set attack, traffic analysis},
doi = {10.1007/b104759},
@@ -9984,7 +9971,7 @@ However, the statistical hitting set attack is prone to wrong solutions with a g
pages = {79--87},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {Golle et al recently introduced universal re-encryption, defining it as re-encryption by a player who does not know the key used for the original encryption, but which still allows an intended player to recover the plaintext. Universal re-encryption is potentially useful as part of many information-hiding techniques, as it allows any player to make ciphertext unidentifiable without knowing the key used.
+ abstract = {Golle et al recently introduced universal re-encryption, defining it as re-encryption by a player who does not know the key used for the original encryption, but which still allows an intended player to recover the plaintext. Universal re-encryption is potentially useful as part of many information-hiding techniques, as it allows any player to make ciphertext unidentifiable without knowing the key used.
Golle et al{\textquoteright}s techniques for universal re-encryption are reviewed, and a hybrid universal re-encryption construction with improved work and space requirements which also permits indefinite re-encryptions is presented. Some implementational issues and optimisations are discussed},
keywords = {information hiding, re-encryption},
isbn = {978-3-540-26203-9},
@@ -10000,8 +9987,8 @@ Golle et al{\textquoteright}s techniques for universal re-encryption are reviewe
number = {1},
year = {2004},
pages = {3--36},
- abstract = {We propose a new specification framework for information hiding properties such as anonymity and privacy. The framework is based on the concept of a function view, which is a concise representation of the attacker{\textquoteright}s partial knowledge about a function. We describe system behavior as a set of functions, and formalize different information hiding properties in terms of views of these functions. We present an extensive case study, in which we use the function view framework to systematically classify and rigorously define a rich domain of identity-related properties, and to demonstrate that privacy and anonymity are independent.
-
+ abstract = {We propose a new specification framework for information hiding properties such as anonymity and privacy. The framework is based on the concept of a function view, which is a concise representation of the attacker{\textquoteright}s partial knowledge about a function. We describe system behavior as a set of functions, and formalize different information hiding properties in terms of views of these functions. We present an extensive case study, in which we use the function view framework to systematically classify and rigorously define a rich domain of identity-related properties, and to demonstrate that privacy and anonymity are independent.
+
The key feature of our approach is its modularity. It yields precise, formal specifications of information hiding properties for any protocol formalism and any choice of the attacker model as long as the latter induce an observational equivalence relation on protocol instances. In particular, specifications based on function views are suitable for any cryptographic process calculus that defines some form of indistinguishability between processes. Our definitions of information hiding properties take into account any feature of the security model, including probabilities, random number generation, timing, etc., to the extent that it is accounted for by the formalism in which the system is specified},
keywords = {anonymity, information hiding, privacy},
issn = {0926-227X},
@@ -10063,13 +10050,13 @@ The key feature of our approach is its modularity. It yields precise, formal spe
school = {KTH/Royal Institute of Technology},
type = {Master{\textquoteright}s Thesis},
address = {Stockholm},
- abstract = {In this thesis we present the design of Keso, a distributed and completely decentralized file system based on the peer-to-peer overlay network DKS. While designing Keso we have taken into account many of the problems that exist in today{\textquoteright}s distributed file systems.
-Traditionally, distributed file systems have been built around dedicated file servers which often use expensive hardware to minimize the risk of breakdown and to handle the load. System administrators are required to monitor the load and disk usage of the file servers and to manually add clients and servers to the system.
-
-Another drawback with centralized file systems are that a lot of storage space is unused on clients. Measurements we have taken on existing computer systems has shown that a large part of the storage capacity of workstations is unused. In the system we looked at there was three times as much storage space available on workstations than was stored in the distributed file system. We have also shown that much data stored in a production use distributed file system is redundant.
-
-The main goals for the design of Keso has been that it should make use of spare resources, avoid storing unnecessarily redundant data, scale well, be self-organizing and be a secure file system suitable for a real world environment.
-
+ abstract = {In this thesis we present the design of Keso, a distributed and completely decentralized file system based on the peer-to-peer overlay network DKS. While designing Keso we have taken into account many of the problems that exist in today{\textquoteright}s distributed file systems.
+Traditionally, distributed file systems have been built around dedicated file servers which often use expensive hardware to minimize the risk of breakdown and to handle the load. System administrators are required to monitor the load and disk usage of the file servers and to manually add clients and servers to the system.
+
+Another drawback with centralized file systems are that a lot of storage space is unused on clients. Measurements we have taken on existing computer systems has shown that a large part of the storage capacity of workstations is unused. In the system we looked at there was three times as much storage space available on workstations than was stored in the distributed file system. We have also shown that much data stored in a production use distributed file system is redundant.
+
+The main goals for the design of Keso has been that it should make use of spare resources, avoid storing unnecessarily redundant data, scale well, be self-organizing and be a secure file system suitable for a real world environment.
+
By basing Keso on peer-to-peer techniques it becomes highly scalable, fault tolerant and self-organizing. Keso is intended to run on ordinary workstations and can make use of the previously unused storage space. Keso also provides means for access control and data privacy despite being built on top of untrusted components. The file system utilizes the fact that a lot of data stored in traditional file systems is redundant by letting all files that contains a datablock with the same contents reference the same datablock in the file system. This is achieved while still maintaining access control and data privacy},
keywords = {decentralized file system, DKS, Keso},
www_section = {http://mattias.amnefe.lt/keso/},
@@ -10092,8 +10079,8 @@ By basing Keso on peer-to-peer techniques it becomes highly scalable, fault tole
year = {2004},
month = {October},
address = {Washington, DC, USA},
- abstract = {Anonymity networks have long relied on diversity of node location for protection against attacks---typically an adversary who can observe a larger fraction of the network can launch a more effective attack. We investigate the diversity of two deployed anonymity networks, Mixmaster and Tor, with respect to an adversary who controls a single Internet administrative domain.
-
+ abstract = {Anonymity networks have long relied on diversity of node location for protection against attacks---typically an adversary who can observe a larger fraction of the network can launch a more effective attack. We investigate the diversity of two deployed anonymity networks, Mixmaster and Tor, with respect to an adversary who controls a single Internet administrative domain.
+
Specifically, we implement a variant of a recently proposed technique that passively estimates the set of administrative domains (also known as autonomous systems, or ASes) between two arbitrary end-hosts without having access to either end of the path. Using this technique, we analyze the AS-level paths that are likely to be used in these anonymity networks. We find several cases in each network where multiple nodes are in the same administrative domain. Further, many paths between nodes, and between nodes and popular endpoints, traverse the same domain},
keywords = {anonymity, autonomous systems},
doi = {10.1145/1029179.1029199},
@@ -10268,8 +10255,8 @@ Specifically, we implement a variant of a recently proposed technique that passi
publisher = {ACM Press},
organization = {ACM Press},
address = {Washington DC, USA},
- abstract = {Efforts to design faster synchronous mix networks have focused on reducing the computational cost of mixing per server. We propose a different approach: our reencryption mixnet allows servers to mix inputs in parallel. The result is a dramatic reduction in overall mixing time for moderate-to-large numbers of servers. As measured in the model we describe, for n inputs and $M$ servers our parallel re encryption mixnet produces output in time at most 2n -- and only around n assuming a majority of honest servers. In contrast, a traditional, sequential, synchronous re-encryption mixnet requires time Mn.
-
+ abstract = {Efforts to design faster synchronous mix networks have focused on reducing the computational cost of mixing per server. We propose a different approach: our reencryption mixnet allows servers to mix inputs in parallel. The result is a dramatic reduction in overall mixing time for moderate-to-large numbers of servers. As measured in the model we describe, for n inputs and $M$ servers our parallel re encryption mixnet produces output in time at most 2n -- and only around n assuming a majority of honest servers. In contrast, a traditional, sequential, synchronous re-encryption mixnet requires time Mn.
+
Parallel re-encryption mixnets offer security guarantees comparable to those of synchronous mixnets, and in many cases only a slightly weaker guarantee of privacy. Our proposed construction is applicable to many recently proposed re-encryption mixnets, such as those of Furukawa and Sako, Neff, Jakobsson et al., and Golle and Boneh. In practice, parallel mixnets promise a potentially substantial time saving in applications such as anonymous electronic elections},
keywords = {anonymity, privacy},
isbn = {1-58113-961-6},
@@ -10488,8 +10475,8 @@ Parallel re-encryption mixnets offer security guarantees comparable to those of
pages = {266--280},
publisher = {Springer-Verlag, LNCS 3110},
organization = {Springer-Verlag, LNCS 3110},
- abstract = {We consider unlinkability of communication problem: given n users, each sending a message to some destination, encode and route the messages so that an adversary analyzing the traffic in the communication network cannot link the senders with the recipients. A solution should have a small communication overhead, that is, the number of additional messages should be kept low.
-David Chaum introduced idea of mixes for solving this problem. His approach was developed further by Simon and Rackoff, and implemented later as the onion protocol. Even if the onion protocol is widely regarded as secure and used in practice, formal arguments supporting this claim are rare and far from being complete. On top of that, in certain scenarios very simple tricks suffice to break security without breaking the cryptographic primitives. It turns out that one source of difficulties in analyzing the onion protocols security is the adversary model. In a recent work, Berman, Fiat and Ta-Shma develop a new and more realistic model in which only a constant fraction of communication lines can be accessed by an adversary, the number of messages does not need to be high and the preferences of the users are taken into account. For this model they prove that with high probability a good level of unlinkability is obtained after steps of the onion protocol where n is the number of messages sent.
+ abstract = {We consider unlinkability of communication problem: given n users, each sending a message to some destination, encode and route the messages so that an adversary analyzing the traffic in the communication network cannot link the senders with the recipients. A solution should have a small communication overhead, that is, the number of additional messages should be kept low.
+David Chaum introduced idea of mixes for solving this problem. His approach was developed further by Simon and Rackoff, and implemented later as the onion protocol. Even if the onion protocol is widely regarded as secure and used in practice, formal arguments supporting this claim are rare and far from being complete. On top of that, in certain scenarios very simple tricks suffice to break security without breaking the cryptographic primitives. It turns out that one source of difficulties in analyzing the onion protocols security is the adversary model. In a recent work, Berman, Fiat and Ta-Shma develop a new and more realistic model in which only a constant fraction of communication lines can be accessed by an adversary, the number of messages does not need to be high and the preferences of the users are taken into account. For this model they prove that with high probability a good level of unlinkability is obtained after steps of the onion protocol where n is the number of messages sent.
In this paper we improve these results: we show that the same level of unlinkability (expressed as variation distance between certain probability distributions) is obtained with high probability already after steps of the onion protocol. Asymptotically, this is the best result possible, since obviously (log n) steps are necessary. On top of that, our analysis is much simpler. It is based on path coupling technique designed for showing rapid mixing of Markov chains},
keywords = {anonymity, Markov chain, path coupling, rapid mixing, unlinkability},
isbn = {978-3-540-23208-7},
@@ -10564,7 +10551,7 @@ In this paper we improve these results: we show that the same level of unlinkabi
year = {2004},
month = {May},
pages = {51--63},
- abstract = {We define a new type of mix network that offers a reduced form of robustness: the mixnet can prove that every message it outputs corresponds to an input submitted by a player without revealing which input (for honest players). We call mixnets with this property reputable mixnets. Reputable mixnets are not fully robust, because they offer no guarantee that distinct outputs correspond to distinct inputs. In particular, a reputable mix may duplicate or erase messages. A reputable mixnet, however, can defend itself against charges of having authored the output messages it produces. This ability is very useful in practice, as it shields the mixnet from liability in the event that an output message is objectionable or illegal.
+ abstract = {We define a new type of mix network that offers a reduced form of robustness: the mixnet can prove that every message it outputs corresponds to an input submitted by a player without revealing which input (for honest players). We call mixnets with this property reputable mixnets. Reputable mixnets are not fully robust, because they offer no guarantee that distinct outputs correspond to distinct inputs. In particular, a reputable mix may duplicate or erase messages. A reputable mixnet, however, can defend itself against charges of having authored the output messages it produces. This ability is very useful in practice, as it shields the mixnet from liability in the event that an output message is objectionable or illegal.
We propose three very efficient protocols for reputable mixnets, all synchronous. The first protocol is based on blind signatures. It works both with Chaumian decryption mixnets or re-encryption mixnets based on ElGamal, but guarantees a slightly weaker form of reputability which we call near-reputability. The other two protocols are based on ElGamal re-encryption over a composite group and offer true reputability. One requires interaction between the mixnet and the players before players submit their inputs. The other assumes no interaction prior to input submission},
keywords = {anonymity, privacy},
isbn = {978-3-540-26203-9},
@@ -10643,7 +10630,7 @@ We propose three very efficient protocols for reputable mixnets, all synchronous
organization = {Springer Berlin / Heidelberg},
type = {publication},
address = {Lawrence, Kansas},
- abstract = {Programmable networks aim at the fast and flexible creation of services within a network. Often cited examples are audio and video transcoding, application layer multicast, or mobility and resilience support. In order to become commercially viable, programmable networks must provide authentication, authorization and accounting functionality. The mechanisms used to achieve these functionalities must be secure, reliable, and scalable, to be used in production scale programmable networks. Additionally programmable nodes must resist various kinds of attacks, such as denial of service or replay attacks. Fraudulent use by individual users must also be prohibited.
+ abstract = {Programmable networks aim at the fast and flexible creation of services within a network. Often cited examples are audio and video transcoding, application layer multicast, or mobility and resilience support. In order to become commercially viable, programmable networks must provide authentication, authorization and accounting functionality. The mechanisms used to achieve these functionalities must be secure, reliable, and scalable, to be used in production scale programmable networks. Additionally programmable nodes must resist various kinds of attacks, such as denial of service or replay attacks. Fraudulent use by individual users must also be prohibited.
This paper describes the design and implementation of a secure, reliable, and scalable signaling mechanism clients can use to initiate service startup and to manage services running on the nodes of a programmable network. This mechanism is designed for production scale networks with AAA-functionality},
keywords = {programmable networks, secrecy},
isbn = {978-3-540-71499-6},
@@ -10688,8 +10675,8 @@ This paper describes the design and implementation of a secure, reliable, and sc
pages = {188--200},
publisher = {ACM Press},
organization = {ACM Press},
- abstract = {Developing sensor network applications demands a new set of tools to aid programmers. A number of simulation environments have been developed that provide varying degrees of scalability, realism, and detail for understanding the behavior of sensor networks. To date, however, none of these tools have addressed one of the most important aspects of sensor application design: that of power consumption. While simple approximations of overall power usage can be derived from estimates of node duty cycle and communication rates, these techniques often fail to capture the detailed, low-level energy requirements of the CPU, radio, sensors, and other peripherals.
-
+ abstract = {Developing sensor network applications demands a new set of tools to aid programmers. A number of simulation environments have been developed that provide varying degrees of scalability, realism, and detail for understanding the behavior of sensor networks. To date, however, none of these tools have addressed one of the most important aspects of sensor application design: that of power consumption. While simple approximations of overall power usage can be derived from estimates of node duty cycle and communication rates, these techniques often fail to capture the detailed, low-level energy requirements of the CPU, radio, sensors, and other peripherals.
+
In this paper, we present, a scalable simulation environment for wireless sensor networks that provides an accurate, per-node estimate of power consumption. PowerTOSSIM is an extension to TOSSIM, an event-driven simulation environment for TinyOS applications. In PowerTOSSIM, TinyOS components corresponding to specific hardware peripherals (such as the radio, EEPROM, LEDs, and so forth) are instrumented to obtain a trace of each device{\textquoteright}s activity during the simulation runPowerTOSSIM employs a novel code-transformation technique to estimate the number of CPU cycles executed by each node, eliminating the need for expensive instruction-level simulation of sensor nodes. PowerTOSSIM includes a detailed model of hardware energy consumption based on the Mica2 sensor node platform. Through instrumentation of actual sensor nodes, we demonstrate that PowerTOSSIM provides accurate estimation of power consumption for a range of applications and scales to support very large simulations},
keywords = {sensor networks, TinyOS},
doi = {10.1145/1031495.1031518},
@@ -10803,7 +10790,7 @@ In this paper, we present, a scalable simulation environment for wireless sensor
booktitle = {In NSDI},
year = {2004},
pages = {337--350},
- abstract = {Availability is a storage system property that is both highly desired and yet minimally engineered. While many systems provide mechanisms to improve availability--such as redundancy and failure recovery--how to best configure these mechanisms is typically left to the system manager. Unfortunately, few individuals have the skills to properly manage the trade-offs involved, let alone the time to adapt these decisions to changing conditions. Instead, most systems are configured statically and with only a cursory understanding of how the configuration will impact overall performance or availability. While this issue can be problematic even for individual storage arrays, it becomes increasingly important as systems are distributed--and absolutely critical for the wide-area peer-to-peer storage infrastructures being explored.
+ abstract = {Availability is a storage system property that is both highly desired and yet minimally engineered. While many systems provide mechanisms to improve availability--such as redundancy and failure recovery--how to best configure these mechanisms is typically left to the system manager. Unfortunately, few individuals have the skills to properly manage the trade-offs involved, let alone the time to adapt these decisions to changing conditions. Instead, most systems are configured statically and with only a cursory understanding of how the configuration will impact overall performance or availability. While this issue can be problematic even for individual storage arrays, it becomes increasingly important as systems are distributed--and absolutely critical for the wide-area peer-to-peer storage infrastructures being explored.
This paper describes the motivation, architecture and implementation for a new peer-to-peer storage system, called TotalRecall, that automates the task of availability management. In particular, the TotalRecall system automatically measures and estimates the availability of its constituent host components, predicts their future availability based on past behavior, calculates the appropriate redundancy mechanisms and repair policies, and delivers user-specified availability while maximizing efficiency},
keywords = {P2P},
www_section = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.9775},
@@ -10849,7 +10836,7 @@ This paper describes the motivation, architecture and implementation for a new p
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
address = {San Francisco, USA},
- abstract = {We introduce a new cryptographic technique that we call universal re-encryption. A conventional cryptosystem that permits re-encryption, such as ElGamal, does so only for a player with knowledge of the public key corresponding to a given ciphertext. In contrast, universal re-encryption can be done without knowledge of public keys. We propose an asymmetric cryptosystem with universal re-encryption that is half as efficient as standard ElGamal in terms of computation and storage.
+ abstract = {We introduce a new cryptographic technique that we call universal re-encryption. A conventional cryptosystem that permits re-encryption, such as ElGamal, does so only for a player with knowledge of the public key corresponding to a given ciphertext. In contrast, universal re-encryption can be done without knowledge of public keys. We propose an asymmetric cryptosystem with universal re-encryption that is half as efficient as standard ElGamal in terms of computation and storage.
While technically and conceptually simple, universal re-encryption leads to new types of functionality in mixnet architectures. Conventional mixnets are often called upon to enable players to communicate with one another through channels that are externally anonymous, i.e., that hide information permitting traffic-analysis. Universal re-encryption lets us construct a mixnet of this kind in which servers hold no public or private keying material, and may therefore dispense with the cumbersome requirements of key generation, key distribution, and private-key management. We describe two practical mixnet constructions, one involving asymmetric input ciphertexts, and another with hybrid-ciphertext inputs},
keywords = {anonymity, private channels, universal re-encryption},
isbn = {978-3-540-20996-6},
@@ -11133,8 +11120,8 @@ While technically and conceptually simple, universal re-encryption leads to new
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Berkeley, CA, USA},
- abstract = {We present a novel peer-to-peer backup technique that allows computers connected to the Internet to back up their data cooperatively: Each computer has a set of partner computers, which collectively hold its backup data. In return, it holds a part of each partner{\textquoteright}s backup data. By adding redundancy and distributing the backup data across many partners, a highly-reliable backup can be obtained in spite of the low reliability of the average Internet machine.
-
+ abstract = {We present a novel peer-to-peer backup technique that allows computers connected to the Internet to back up their data cooperatively: Each computer has a set of partner computers, which collectively hold its backup data. In return, it holds a part of each partner{\textquoteright}s backup data. By adding redundancy and distributing the backup data across many partners, a highly-reliable backup can be obtained in spite of the low reliability of the average Internet machine.
+
Because our scheme requires cooperation, it is potentially vulnerable to several novel attacks involving free riding (e.g., holding a partner{\textquoteright}s data is costly, which tempts cheating) or disruption. We defend against these attacks using a number of new methods, including the use of periodic random challenges to ensure partners continue to hold data and the use of disk-space wasting to make cheating unprofitable. Results from an initial prototype show that our technique is feasible and very inexpensive: it appears to be one to two orders of magnitude cheaper than existing Internet backup services},
keywords = {backup, P2P, redundancy},
www_section = {http://portal.acm.org/citation.cfm?id=1247343$\#$},
@@ -11202,8 +11189,8 @@ Because our scheme requires cooperation, it is potentially vulnerable to several
@booklet {_,
title = {A DHT-based Backup System},
year = {2003},
- abstract = {Distributed hashtables have been proposed as a way to simplify the construction of large-scale distributed applications(e.g.[1,6]). DHTs are completely decentralized systems that provide block storage on a changing collection of nodes spread throughout the Internet. Each block is identified by aunique key. DHTs spread the load of storing and serving blocks across all of the active nodes and keep the blocks available as nodes join and leave the system.
-
+ abstract = {Distributed hashtables have been proposed as a way to simplify the construction of large-scale distributed applications(e.g.[1,6]). DHTs are completely decentralized systems that provide block storage on a changing collection of nodes spread throughout the Internet. Each block is identified by aunique key. DHTs spread the load of storing and serving blocks across all of the active nodes and keep the blocks available as nodes join and leave the system.
+
This paper presents the design and implementation of a cooperative off-site backup system, Venti-DHash. Venti-DHash is based on a DHT infrastructure and is designed to support recovery of data after a disaster by keeping regular snapshots of filesystems distributed off-site, on peers on the Internet. Where as conventional backup systems incur significant equipment costs, manual effort and high administrative overhead, we hope that a distributed backup system can alleviate these problems, making backups easy and feasible. By building this system on top of a DHT, the backup application inherits the properties of the DHT, and serves to evaluate the feasibility of using a DHT to build larg escale applications},
keywords = {backup, distributed hash table},
www_section = {http://doc.cat-v.org/plan_9/misc/venti-dhash/},
@@ -11313,11 +11300,11 @@ This paper presents the design and implementation of a cooperative off-site back
year = {2003},
month = {June},
publisher = {Vieweg-Verlag},
- abstract = {This paper describes economic aspects of GNUnet, a peer-to-peer framework for anonymous distributed file-sharing. GNUnet is decentralized; all nodes are equal peers. In particular, there are no trusted entities in the network. This paper describes an economic model to perform resource allocation and defend against malicious
-participants in this context. The approach presented does not use credentials or payments; rather, it is based on trust. The design is much like that of a cooperative game in which peers take the role of players. Nodes must cooperate to achieve individual goals. In such a scenario, it is important to be able to distinguish between nodes exhibiting friendly behavior and those exhibiting malicious behavior.
-
-GNUnet aims to provide anonymity for its users. Its design
-makes it hard to link a transaction to the node where it originated from. While anonymity requirements make a global view of the end-points of a transaction infeasible, the local link-to-link messages can be fully authenticated. Our economic model is based entirely on this local view of the network and takes only local
+ abstract = {This paper describes economic aspects of GNUnet, a peer-to-peer framework for anonymous distributed file-sharing. GNUnet is decentralized; all nodes are equal peers. In particular, there are no trusted entities in the network. This paper describes an economic model to perform resource allocation and defend against malicious
+participants in this context. The approach presented does not use credentials or payments; rather, it is based on trust. The design is much like that of a cooperative game in which peers take the role of players. Nodes must cooperate to achieve individual goals. In such a scenario, it is important to be able to distinguish between nodes exhibiting friendly behavior and those exhibiting malicious behavior.
+
+GNUnet aims to provide anonymity for its users. Its design
+makes it hard to link a transaction to the node where it originated from. While anonymity requirements make a global view of the end-points of a transaction infeasible, the local link-to-link messages can be fully authenticated. Our economic model is based entirely on this local view of the network and takes only local
decisions},
keywords = {anonymity, file-sharing, GNUnet},
www_section = {http://grothoff.org/christian/ebe.pdf},
@@ -11337,7 +11324,7 @@ decisions},
title = {Extremum Feedback with Partial Knowledge},
volume = {Volume 2816/2003},
year = {2003},
- abstract = {A scalable feedback mechanism to solicit feedback from a potentially very large group of networked nodes is an important building block for many network protocols. Multicast transport protocols use it for negative acknowledgements and for delay and packet loss determination. Grid computing and peer-to-peer applications can use similar approaches to find nodes that are, at a given moment in time, best suited to serve a request. In sensor networks, such mechanisms allow to report extreme values in a resource efficient way.
+ abstract = {A scalable feedback mechanism to solicit feedback from a potentially very large group of networked nodes is an important building block for many network protocols. Multicast transport protocols use it for negative acknowledgements and for delay and packet loss determination. Grid computing and peer-to-peer applications can use similar approaches to find nodes that are, at a given moment in time, best suited to serve a request. In sensor networks, such mechanisms allow to report extreme values in a resource efficient way.
In this paper we analyze several extensions to the exponential feedback algorithm [5,6] that provide an optimal way to collect extreme values from a potentially very large group of networked nodes. In contrast to prior work, we focus on how knowledge about the value distribution in the group can be used to optimize the feedback process. We describe the trade-offs that have to be decided upon when using these extensions and provide additional insight into their performance by means of simulation. Furthermore, we briefly illustrate how sample applications can benefit from the proposed mechanisms},
isbn = {978-3-540-20051-2},
issn = {0302-9743 },
@@ -11371,9 +11358,9 @@ In this paper we analyze several extensions to the exponential feedback algorith
pages = {141--160},
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
- abstract = {This paper describes how anonymity is achieved in GNUnet, a framework for anonymous distributed and secure networking.
-
-The main focus of this work is gap, a simple protocol for anonymous transfer of data which can achieve better anonymity guarantees than many traditional indirection schemes and is additionally more efficient. gap is based on a new perspective on how to achieve anonymity. Based on this new perspective it is possible to relax the requirements stated in traditional indirection
+ abstract = {This paper describes how anonymity is achieved in GNUnet, a framework for anonymous distributed and secure networking.
+
+The main focus of this work is gap, a simple protocol for anonymous transfer of data which can achieve better anonymity guarantees than many traditional indirection schemes and is additionally more efficient. gap is based on a new perspective on how to achieve anonymity. Based on this new perspective it is possible to relax the requirements stated in traditional indirection
schemes, allowing individual nodes to balance anonymity with efficiency according to their specific needs},
keywords = {anonymity, GNUnet, installation},
www_section = {http://grothoff.org/christian/aff.pdf},
@@ -11388,7 +11375,7 @@ schemes, allowing individual nodes to balance anonymity with efficiency accordin
pages = {18--31},
publisher = {Springer-Verlag, LNCS 2760},
organization = {Springer-Verlag, LNCS 2760},
- abstract = {In this paper we present a generalised framework for expressing batching strategies of a mix. First, we note that existing mixes can be represented as functions from the number of messages in the mix to the fraction of messages to be flushed.
+ abstract = {In this paper we present a generalised framework for expressing batching strategies of a mix. First, we note that existing mixes can be represented as functions from the number of messages in the mix to the fraction of messages to be flushed.
We then show how to express existing mixes in the framework, and then suggest other mixes which arise out of that framework. We note that these cannot be expressed as pool mixes. In particular, we call binomial mix a timed pool mix that tosses coins and uses a probability function that depends on the number of messages inside the mix at the time of flushing. We discuss the properties of this mix},
keywords = {mix},
isbn = {978-3-540-20610-1},
@@ -11434,7 +11421,7 @@ We then show how to express existing mixes in the framework, and then suggest ot
pages = {0--187},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
- abstract = {Routing algorithm has great influence on system overall performance in Peer-to-Peer (P2P) applications. In current DHT based routing algorithms, routing tasks are distributed across all system peers. However, a routing hop could happen between two widely separated peers with high network link latency which greatly increases system routing overheads.
+ abstract = {Routing algorithm has great influence on system overall performance in Peer-to-Peer (P2P) applications. In current DHT based routing algorithms, routing tasks are distributed across all system peers. However, a routing hop could happen between two widely separated peers with high network link latency which greatly increases system routing overheads.
In this paper, we propose a new P2P routing algorithm--- HIERAS to relieve this problem, it keeps scalability property of current DHT algorithms and improves system routing performance by the introduction of hierarchical structure. In HIERAS, we create several lower level P2P rings besides the highest level P2P ring. A P2P ring is a subset of the overall P2P overlay network. We create P2P rings in such a strategy that the average link latency between two peers in lower level rings is much smaller than higher level rings. Routing tasks are first executed in lower level rings before they go up to higher level rings, a large portion of routing hops previously executed in the global P2P ring are now replaced by hops in lower level rings, thus routing overheads can be reduced. The simulation results show HIERAS routing algorithm can significantly improve P2P system routing performance},
keywords = {distributed hash table, P2P},
issn = {0190-3918},
@@ -11651,10 +11638,10 @@ In this paper, we propose a new P2P routing algorithm--- HIERAS to relieve this
title = {Mixmaster Protocol --- Version 2},
year = {2003},
month = {July},
- abstract = {Most e-mail security protocols only protect the message body, leaving useful information such as the the identities of the conversing parties, sizes of messages and frequency of message exchange open to adversaries. This document describes Mixmaster (version 2), a mail transfer protocol designed to protect electronic mail against traffic
-analysis.
-
-Mixmaster is based on D. Chaum{\textquoteright}s mix-net protocol. A mix (remailer) is a service that forwards messages, using public key
+ abstract = {Most e-mail security protocols only protect the message body, leaving useful information such as the the identities of the conversing parties, sizes of messages and frequency of message exchange open to adversaries. This document describes Mixmaster (version 2), a mail transfer protocol designed to protect electronic mail against traffic
+analysis.
+
+Mixmaster is based on D. Chaum{\textquoteright}s mix-net protocol. A mix (remailer) is a service that forwards messages, using public key
cryptography to hide the correlation between its inputs and outputs. Sending messages through sequences of remailers achieves anonymity and unobservability of communications against a powerful adversary},
keywords = {electronic mail, public key cryptography, traffic analysis},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/freehaven.net-anonbib-cache-mixmaster-spec.txt.pdf},
@@ -11824,7 +11811,7 @@ cryptography to hide the correlation between its inputs and outputs. Sending mes
month = {October},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {In this paper we consider low latency connection-based anonymity systems which can be used for applications like web browsing or SSH. Although several such systems have been designed and built, their anonymity has so far not been adequately evaluated.
+ abstract = {In this paper we consider low latency connection-based anonymity systems which can be used for applications like web browsing or SSH. Although several such systems have been designed and built, their anonymity has so far not been adequately evaluated.
We analyse the anonymity of connection-based systems against passive adversaries. We give a precise description of two attacks, evaluate their effectiveness, and calculate the amount of traffic necessary to provide a minimum degree of protection against them},
keywords = {anonymity},
isbn = {978-3-540-20300-1},
@@ -11942,7 +11929,7 @@ We analyse the anonymity of connection-based systems against passive adversaries
month = {April},
publisher = {Springer-Verlag, LNCS 2612},
organization = {Springer-Verlag, LNCS 2612},
- abstract = {Mix chains as proposed by Chaum allow sending untraceable electronic e-mail without requiring trust in a single authority: messages are recursively public-key encrypted to multiple intermediates (mixes), each of which forwards the message after removing one layer of encryption. To conceal as much information as possible when using variable (source routed) chains, all messages passed to mixes should be of the same length; thus, message length should not decrease when a mix transforms an input message into the corresponding output message directed at the next mix in the chain. Chaum described an implementation
+ abstract = {Mix chains as proposed by Chaum allow sending untraceable electronic e-mail without requiring trust in a single authority: messages are recursively public-key encrypted to multiple intermediates (mixes), each of which forwards the message after removing one layer of encryption. To conceal as much information as possible when using variable (source routed) chains, all messages passed to mixes should be of the same length; thus, message length should not decrease when a mix transforms an input message into the corresponding output message directed at the next mix in the chain. Chaum described an implementation
for such length-preserving mixes, but it is not secure against active attacks. We show how to build practical cryptographically secure lengthpreserving mixes. The conventional denition of security against chosen ciphertext attacks is not applicable to length-preserving mixes; we give an appropriate denition and show that our construction achieves provable security},
keywords = {mix chain, public key cryptography},
www_section = {http://eprints.kfupm.edu.sa/59837/},
@@ -12002,8 +11989,8 @@ for such length-preserving mixes, but it is not secure against active attacks. W
month = {October},
publisher = {Springer Berlin / Heidelberg},
organization = {Springer Berlin / Heidelberg},
- abstract = {Recently, David Chaum proposed an electronic voting scheme that combines visual cryptography and digital processing. It was designed to meet not only mathematical security standards, but also to be accepted by voters that do not trust electronic devices.
-In this scheme mix-servers are used to guarantee anonymity of the votes in the counting process. The mix-servers are operated by different parties, so an evidence of their correct operation is necessary. For this purpose the protocol uses randomized partial checking of Jakobsson et al., where some randomly selected connections between the (encoded) inputs and outputs of a mix-server are revealed. This leaks some information about the ballots, even if intuitively this information cannot be used for any efficient attack.
+ abstract = {Recently, David Chaum proposed an electronic voting scheme that combines visual cryptography and digital processing. It was designed to meet not only mathematical security standards, but also to be accepted by voters that do not trust electronic devices.
+In this scheme mix-servers are used to guarantee anonymity of the votes in the counting process. The mix-servers are operated by different parties, so an evidence of their correct operation is necessary. For this purpose the protocol uses randomized partial checking of Jakobsson et al., where some randomly selected connections between the (encoded) inputs and outputs of a mix-server are revealed. This leaks some information about the ballots, even if intuitively this information cannot be used for any efficient attack.
We provide a rigorous stochastic analysis of how much information is revealed by randomized partial checking in the Chaums protocol. We estimate how many mix-servers are necessary for a fair security level. Namely, we consider probability distribution of the permutations linking the encoded votes with the decoded votes given the information revealed by randomized partial checking. We show that the variation distance between this distribution and the uniform distribution is already for a constant number of mix-servers (n is the number of voters). This means that a constant number of trustees in the Chaums protocol is enough to obtain provable security. The analysis also shows that certain details of the Chaums protocol can be simplified without lowering security level},
keywords = {electronic voting, Markov chain, path coupling, randomized partial checking, rapid mixing},
isbn = {978-3-540-20300-1},
@@ -12368,7 +12355,7 @@ We provide a rigorous stochastic analysis of how much information is revealed by
pages = {125--140},
publisher = {Springer-Verlag, LNCS 2760},
organization = {Springer-Verlag, LNCS 2760},
- abstract = {All existing anti-censorship systems for theWeb rely on proxies to grant clients access to censored information. Therefore, they face the proxy discovery problem: how can clients discover the proxies without having the censor discover and block these proxies? To avoid widespread discovery and blocking, proxies must not be widely published and should be discovered in-band. In this paper, we present a proxy discovery mechanism called keyspace hopping that meets this goal. Similar in spirit to frequency hopping in wireless networks, keyspace hopping ensures that each client discovers only a small fraction of the total number of proxies.However, requiring clients to independently discover proxies from a large set makes it practically impossible to verify the trustworthiness of every proxy and creates the possibility of having untrusted proxies. To address
+ abstract = {All existing anti-censorship systems for theWeb rely on proxies to grant clients access to censored information. Therefore, they face the proxy discovery problem: how can clients discover the proxies without having the censor discover and block these proxies? To avoid widespread discovery and blocking, proxies must not be widely published and should be discovered in-band. In this paper, we present a proxy discovery mechanism called keyspace hopping that meets this goal. Similar in spirit to frequency hopping in wireless networks, keyspace hopping ensures that each client discovers only a small fraction of the total number of proxies.However, requiring clients to independently discover proxies from a large set makes it practically impossible to verify the trustworthiness of every proxy and creates the possibility of having untrusted proxies. To address
this, we propose separating the proxy into two distinct components|the messenger, which the client discovers using keyspace hopping and which simply acts as a gateway to the Internet; and the portal, whose identity is widely-published and whose responsibility it is to interpret and serve the client{\textquoteright}s requests for censored content. We show how this separation, as well as in-band proxy discovery, can be applied to a variety of anti-censorship systems},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/feamster-pet2003.pdf},
author = {Nick Feamster and Magdalena Balazinska and Winston Wang and Hari Balakrishnan and David Karger},
@@ -12558,7 +12545,7 @@ this, we propose separating the proxy into two distinct components|the messenger
month = mar,
publisher = {Springer-Verlag London, UK},
organization = {Springer-Verlag London, UK},
- abstract = {In this paper we propose a new Peer-to-Peer architecture for a censorship resistant system with user, server and active-server document anonymity as well as efficient document retrieval. The retrieval service is layered on top of an existing Peer-to-Peer infrastructure, which should facilitate its implementation. The key idea is to separate the role of document storers from the machines visible to the users, which makes each individual part of the system less prone to attacks, and therefore to censorship.
+ abstract = {In this paper we propose a new Peer-to-Peer architecture for a censorship resistant system with user, server and active-server document anonymity as well as efficient document retrieval. The retrieval service is layered on top of an existing Peer-to-Peer infrastructure, which should facilitate its implementation. The key idea is to separate the role of document storers from the machines visible to the users, which makes each individual part of the system less prone to attacks, and therefore to censorship.
Indeed, if one server has been pressured into removal, the other server administrators may simply follow the precedent and remove the offending content themselves},
keywords = {anonymity, censorship resistance, P2P},
isbn = {978-3-540-44179-3},
@@ -12575,7 +12562,7 @@ Indeed, if one server has been pressured into removal, the other server administ
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {London, UK},
- abstract = {AMnet provides a framework for flexible and rapid service creation. It is based on Programmable Networking technologies and uses active nodes (AMnodes) within the network for the provision of individual, application-specific services. To this end, these AMnodes execute service modules that are loadable on-demand and enhance the functionality of intermediate systems without the need of long global standardization processes.
+ abstract = {AMnet provides a framework for flexible and rapid service creation. It is based on Programmable Networking technologies and uses active nodes (AMnodes) within the network for the provision of individual, application-specific services. To this end, these AMnodes execute service modules that are loadable on-demand and enhance the functionality of intermediate systems without the need of long global standardization processes.
Placing application-dedicated functionality within the network requires a flexible signaling protocol to discover and announce as well as to establish and maintain the corresponding services. AMnet Signaling was developed for this purpose and will be presented in detail within this paper},
keywords = {multicast, programmable networks},
isbn = {3-540-43709-6},
@@ -12848,8 +12835,8 @@ Placing application-dedicated functionality within the network requires a flexib
month = {April},
publisher = {Springer-Verlag, LNCS 2482},
organization = {Springer-Verlag, LNCS 2482},
- abstract = {In this paper we propose a method to prevent so called {\textquotedblleft}intersection attacks{\textquotedblright} on anonymity services. Intersection attacks are possible if not all users of such a service are active all the time and part of the transfered messages are linkable. Especially in real systems, the group of users (anonymity set) will change over time due to online and off-line periods.
-Our proposed solution is to send pregenerated dummy messages to the communication partner (e.g. the web server), during the user{\textquoteright}s off-line periods.
+ abstract = {In this paper we propose a method to prevent so called {\textquotedblleft}intersection attacks{\textquotedblright} on anonymity services. Intersection attacks are possible if not all users of such a service are active all the time and part of the transfered messages are linkable. Especially in real systems, the group of users (anonymity set) will change over time due to online and off-line periods.
+Our proposed solution is to send pregenerated dummy messages to the communication partner (e.g. the web server), during the user{\textquoteright}s off-line periods.
For a detailed description of our method we assume a cascade of Chaumian MIXes as anonymity service and respect and fulfill the MIX attacker model},
keywords = {anonymity service, intersection attacks},
isbn = {978-3-540-00565-0},
@@ -12943,7 +12930,7 @@ For a detailed description of our method we assume a cascade of Chaumian MIXes a
booktitle = {in International Workshop on Future Directions in Distributed Computing (FuDiCo)},
year = {2002},
pages = {52--55},
- abstract = {Self-organizing peer-to-peer (p2p) overlay networks like CAN, Chord, Pastry and Tapestry (also called distributed hash tables or DHTs) offer a novel platform for a variety of scalable and decentralized distributed applications. These systems provide efficient and fault-tolerant routing, object location, and load balancing within a self-organizing overlay network. One important aspect of these systems is how they exploit network proximity in the underlying Internet. Three basic approaches have been proposed to exploit network proximity in DHTs, geographic layout, proximity routing and proximity neighbour selection. In this position paper, we briefly discuss the three approaches, contrast their strengths and shortcomings, and consider their applicability
+ abstract = {Self-organizing peer-to-peer (p2p) overlay networks like CAN, Chord, Pastry and Tapestry (also called distributed hash tables or DHTs) offer a novel platform for a variety of scalable and decentralized distributed applications. These systems provide efficient and fault-tolerant routing, object location, and load balancing within a self-organizing overlay network. One important aspect of these systems is how they exploit network proximity in the underlying Internet. Three basic approaches have been proposed to exploit network proximity in DHTs, geographic layout, proximity routing and proximity neighbour selection. In this position paper, we briefly discuss the three approaches, contrast their strengths and shortcomings, and consider their applicability
in the different DHT routing protocols. We conclude that proximity neighbor selection, when used in DHTs with prefixbased routing like Pastry and Tapestry, is highly effective and appears to dominate the other approaches},
keywords = {CAN, distributed hash table, P2P},
www_section = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.126.3062},
@@ -12985,8 +12972,8 @@ in the different DHT routing protocols. We conclude that proximity neighbor sele
publisher = {ACM},
organization = {ACM},
address = {San Diego, CA, USA},
- abstract = {Internet users increasingly rely on publicly available data for everything from software installation to investment decisions. Unfortunately, the vast majority of public content on the Internet comes with no integrity or authenticity guarantees. This paper presents the self-certifying read-only file system, a content distribution system providing secure, scalable access to public, read-only data.
-
+ abstract = {Internet users increasingly rely on publicly available data for everything from software installation to investment decisions. Unfortunately, the vast majority of public content on the Internet comes with no integrity or authenticity guarantees. This paper presents the self-certifying read-only file system, a content distribution system providing secure, scalable access to public, read-only data.
+
The read-only file system makes the security of published content independent from that of the distribution infrastructure. In a secure area (perhaps off-line), a publisher creates a digitally-signed database out of a file system{\textquoteright}s contents. The publisher then replicates the database on untrusted content-distribution servers, allowing for high availability. The read-only file system protocol furthermore pushes the cryptographic cost of content verification entirely onto clients, allowing servers to scale to a large number of clients. Measurements of an implementation show that an individual server running on a 550 Mhz Pentium III with FreeBSD can support 1,012 connections per second and 300 concurrent clients compiling a large software package},
keywords = {file systems, read-only, security},
issn = {0734-2071},
@@ -13297,8 +13284,8 @@ The read-only file system makes the security of published content independent fr
month = {August},
publisher = {USENIX Association Berkeley, CA, USA},
organization = {USENIX Association Berkeley, CA, USA},
- abstract = { We propose a new technique for making mix nets robust, called randomized partial checking (RPC). The basic idea is that rather than providing a proof of completely correct operation, each server provides strong evidence of its correct operation by revealing a pseudo-randomly selected subset of its input/output relations.
-Randomized partial checking is exceptionally efficient compared to previous proposals for providing robustness; the evidence provided at each layer is shorter than the output of that layer, and producing the evidence is easier than doing the mixing. It works with mix nets based on any encryption scheme (i.e., on public-key alone, and on hybrid schemes using public-key/symmetric-key combinations). It also works both with Chaumian mix nets where the messages are successively encrypted with each server{\textquoteright}s key, and with mix nets based on a single public key with randomized re-encryption at each layer.
+ abstract = { We propose a new technique for making mix nets robust, called randomized partial checking (RPC). The basic idea is that rather than providing a proof of completely correct operation, each server provides strong evidence of its correct operation by revealing a pseudo-randomly selected subset of its input/output relations.
+Randomized partial checking is exceptionally efficient compared to previous proposals for providing robustness; the evidence provided at each layer is shorter than the output of that layer, and producing the evidence is easier than doing the mixing. It works with mix nets based on any encryption scheme (i.e., on public-key alone, and on hybrid schemes using public-key/symmetric-key combinations). It also works both with Chaumian mix nets where the messages are successively encrypted with each server{\textquoteright}s key, and with mix nets based on a single public key with randomized re-encryption at each layer.
Randomized partial checking is particularly well suited for voting systems, as it ensures voter privacy and provides assurance of correct operation. Voter privacy is ensured (either probabilistically or cryptographically) with appropriate design and parameter selection. Unlike previous work, our work provides voter privacy as a global property of the mix net rather than as a property ensured by a single honest server. RPC-based mix nets also provide high assurance of a correct election result, since a corrupt server is very likely to be caught if it attempts to tamper with even a couple of ballots},
keywords = {electronic voting, public verifiability, randomized partial checking, shuffle network},
isbn = {1-931971-00-5},
@@ -13337,12 +13324,12 @@ Randomized partial checking is particularly well suited for voting systems, as i
@booklet { dwork02memorybound,
title = {On memory-bound functions for fighting spam},
year = {2002},
- abstract = {In 1992, Dwork and Naor proposed that e-mail messages be accompanied by easy-to-check proofs of computational effort in order to discourage junk e-mail, now known as spam. They proposed specific CPU-bound functions for this purpose. Burrows suggested that, since memory access speeds vary across machines much less than do CPU speeds, memory-bound functions may behave more equitably than CPU-bound functions; this approach was first explored by Abadi, Burrows, Manasse, and Wobber [5].
-We further investigate this intriguing proposal. Specifically, we
-1) Provide a formal model of computation and a statement of the problem;
-2) Provide an abstract function and prove an asymptotically tight amortized lower bound on the number of memory accesses required to compute an acceptable proof of effort; specifically, we prove that, on average, the sender of a message must perform many unrelated accesses to memory, while the receiver, in order to verify the work, has to perform significantly fewer accesses;
-3) Propose a concrete instantiation of our abstract function, inspired by the RC4 stream cipher;
-4) Describe techniques to permit the receiver to verify the computation with no memory accesses;
+ abstract = {In 1992, Dwork and Naor proposed that e-mail messages be accompanied by easy-to-check proofs of computational effort in order to discourage junk e-mail, now known as spam. They proposed specific CPU-bound functions for this purpose. Burrows suggested that, since memory access speeds vary across machines much less than do CPU speeds, memory-bound functions may behave more equitably than CPU-bound functions; this approach was first explored by Abadi, Burrows, Manasse, and Wobber [5].
+We further investigate this intriguing proposal. Specifically, we
+1) Provide a formal model of computation and a statement of the problem;
+2) Provide an abstract function and prove an asymptotically tight amortized lower bound on the number of memory accesses required to compute an acceptable proof of effort; specifically, we prove that, on average, the sender of a message must perform many unrelated accesses to memory, while the receiver, in order to verify the work, has to perform significantly fewer accesses;
+3) Propose a concrete instantiation of our abstract function, inspired by the RC4 stream cipher;
+4) Describe techniques to permit the receiver to verify the computation with no memory accesses;
5) Give experimental results showing that our concrete memory-bound function is only about four times slower on a 233 MHz settop box than on a 3.06 GHz workstation, and that speedup of the function is limited even if an adversary knows the access sequence and uses optimal off-line cache replacement},
doi = {10.1007/b11817},
www_section = {citeseer.ist.psu.edu/dwork02memorybound.html},
@@ -13981,7 +13968,7 @@ We further investigate this intriguing proposal. Specifically, we
pages = {276--294},
publisher = {Springer-Verlag, LNCS 1962},
organization = {Springer-Verlag, LNCS 1962},
- abstract = {Collecting accurate profile information and protecting an individual{\textquoteright}s privacy are ordinarily viewed as being at odds. This paper presents mechanisms that protect individual privacy while presenting accurate-indeed authenticated-profile information to servers and merchants. In particular, we give a pseudonym registration scheme and system that enforces unique user registration while separating trust required of registrars, issuers, and validators. This scheme enables the issuance of global unique pseudonyms (GUPs) and attributes enabling practical applications such as authentication of accurate attributes and enforcement of {\textquotedblleft}one-to-a-customer{\textquotedblright} properties.
+ abstract = {Collecting accurate profile information and protecting an individual{\textquoteright}s privacy are ordinarily viewed as being at odds. This paper presents mechanisms that protect individual privacy while presenting accurate-indeed authenticated-profile information to servers and merchants. In particular, we give a pseudonym registration scheme and system that enforces unique user registration while separating trust required of registrars, issuers, and validators. This scheme enables the issuance of global unique pseudonyms (GUPs) and attributes enabling practical applications such as authentication of accurate attributes and enforcement of {\textquotedblleft}one-to-a-customer{\textquotedblright} properties.
We also present a scheme resilient to even pseudonymous profiling yet preserving the ability of merchants to authenticate the accuracy of information. It is the first mechanism of which the authors are aware to guarantee recent validity for group signatures, and more generally multi-group signatures, thus effectively enabling revocation of all or some of the multi-group certificates held by a principal},
keywords = {privacy, pseudonym},
isbn = {978-3-540-42700-1},
@@ -14089,9 +14076,9 @@ We also present a scheme resilient to even pseudonymous profiling yet preserving
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Boston, Massachusetts, USA},
- abstract = {
-Recent advances in hardware and communication technologies have made possible and cost e ective to share a file system among several machines over a local (but possibly also a wide) area network. One of the most successful and widely used such applications is Sun{\textquoteright}s Network File System (NFS). NFS is very simple in structure but assumes a very strong trust model: the user trusts the remote le system server (which might be running on a machine in di erent country) and a network with his/her data. It is easy to see that neither assumption is a very realistic one. The server (or anybody with superuser privileges) might very well read the data on its local lesytem and it is well known that the Internet or any local area network (e.g, Ethernet) is very easy to tap (see for example, Berkeley{\textquoteright}s tcpdump 7, 5] application program). Impersoni cation of users is also another security drawback of NFS. In fact, most of the permission checking over NFS are performed in the kernel of the client. In such a context a pirate can temporarely assign to his own workstation the Internet address of victim. Without secure RPC 9] no further authentication procedure is requested. From here on, the pirate can issue NFS requests presenting himself with any (false) uid and therefore accessing for reading and writing any private data on the server, even protected data.
-Given the above, a user seeking a certain level of security should take some measures. Possible solutions are to use either user-level cryptography or application level cryptography. A discussion of the drawbacks of these approaches is found in 4]. A better approach is to push encryption services into the operating system as done by M. Blaze in the design of his CFS 4].
+ abstract = {
+Recent advances in hardware and communication technologies have made possible and cost e ective to share a file system among several machines over a local (but possibly also a wide) area network. One of the most successful and widely used such applications is Sun{\textquoteright}s Network File System (NFS). NFS is very simple in structure but assumes a very strong trust model: the user trusts the remote le system server (which might be running on a machine in di erent country) and a network with his/her data. It is easy to see that neither assumption is a very realistic one. The server (or anybody with superuser privileges) might very well read the data on its local lesytem and it is well known that the Internet or any local area network (e.g, Ethernet) is very easy to tap (see for example, Berkeley{\textquoteright}s tcpdump 7, 5] application program). Impersoni cation of users is also another security drawback of NFS. In fact, most of the permission checking over NFS are performed in the kernel of the client. In such a context a pirate can temporarely assign to his own workstation the Internet address of victim. Without secure RPC 9] no further authentication procedure is requested. From here on, the pirate can issue NFS requests presenting himself with any (false) uid and therefore accessing for reading and writing any private data on the server, even protected data.
+Given the above, a user seeking a certain level of security should take some measures. Possible solutions are to use either user-level cryptography or application level cryptography. A discussion of the drawbacks of these approaches is found in 4]. A better approach is to push encryption services into the operating system as done by M. Blaze in the design of his CFS 4].
In this paper, we propose a new cryptographic le system, which we call TCFS , as a suitable solution to the problem of privacy for distributed le system (see section 2.1). Our work improves on CFS by providing a deeper integration between the encryption service and the le system which results in a complete transparency of use to the user applications},
keywords = {crytographic file system, UNIX},
isbn = {1-880446-10-3},
@@ -14178,10 +14165,10 @@ In this paper, we propose a new cryptographic le system, which we call TCFS , as
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {London, UK, UK},
- abstract = {We propose a generalisation of Paillier{\textquoteright}s probabilistic public key system, in which the expansion factor is reduced and which allows to adjust the block length of the scheme even after the public key has been fixed, without loosing the homomorphic property. We show that the generalisation is as secure as Paillier{\textquoteright}s original system.
-
-We construct a threshold variant of the generalised scheme as well as zero-knowledge protocols to show that a given ciphertext encrypts one of a set of given plaintexts, and protocols to verify multiplicative relations on plaintexts.
-
+ abstract = {We propose a generalisation of Paillier{\textquoteright}s probabilistic public key system, in which the expansion factor is reduced and which allows to adjust the block length of the scheme even after the public key has been fixed, without loosing the homomorphic property. We show that the generalisation is as secure as Paillier{\textquoteright}s original system.
+
+We construct a threshold variant of the generalised scheme as well as zero-knowledge protocols to show that a given ciphertext encrypts one of a set of given plaintexts, and protocols to verify multiplicative relations on plaintexts.
+
We then show how these building blocks can be used for applying the scheme to efficient electronic voting.This reduces dramatically the work needed to compute the final result of an election, compared to the previously best known schemes.W e show how the basic scheme for a yes/no vote can be easily adapted to casting a vote for up to t out of L candidates. The same basic building blocks can also be adapted to provide receipt-free elections, under appropriate physical assumptions. The scheme for 1 out of L elections can be optimised such that for a certain range of parameter values, a ballot has size only O(log L) bits},
isbn = {3-540-41658-7},
www_section = {http://dl.acm.org/citation.cfm?id=648118.746742},
@@ -14341,8 +14328,8 @@ Our construction strictly improves upon previous constructions and resolves some
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {London, UK},
- abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications.Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data
-storage, data sharing, group communication and naming. Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops.
+ abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications.Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data
+storage, data sharing, group communication and naming. Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops.
Pastry is completely decentralized, scalable, and self-organizing; it automatically adapts to the arrival, departure and failure of nodes. Experimental results obtained with a prototype implementation on an emulated network of up to 100,000 nodes confirm Pastry{\textquoteright}s scalability and efficiency, its ability to self-organize and adapt to node failures, and its good network locality properties},
keywords = {overlay networks, P2P},
isbn = {3-540-42800-3},
@@ -14361,9 +14348,9 @@ Pastry is completely decentralized, scalable, and self-organizing; it automatica
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {Heidelberg, Germany},
- abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications. Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data storage, data sharing, group communication and naming.
-Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops
-Pastry is completely decentralized, scalable, and self-organizing; it automatically adapts to the arrival, departure and failure of nodes. Experimental results obtained with a prototype implementation on an emulated network of up to 100,000 nodes confirm Pastry{\textquoteright}s scalability and efficiency, its ability to self-organize and adapt to node failures, and its good network locality properties
+ abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications. Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data storage, data sharing, group communication and naming.
+Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops
+Pastry is completely decentralized, scalable, and self-organizing; it automatically adapts to the arrival, departure and failure of nodes. Experimental results obtained with a prototype implementation on an emulated network of up to 100,000 nodes confirm Pastry{\textquoteright}s scalability and efficiency, its ability to self-organize and adapt to node failures, and its good network locality properties
Work done in part while visiting Microsoft Research, Cambridge, UK},
keywords = {distributed hash table, Pastry},
isbn = {3-540-42800-3},
@@ -14379,11 +14366,11 @@ Work done in part while visiting Microsoft Research, Cambridge, UK},
publisher = {O{\textquoteright}Reilly \& Associates, Inc},
organization = {O{\textquoteright}Reilly \& Associates, Inc},
address = {Sebastopol, CA, USA},
- abstract = {Upstart software projects Napster, Gnutella, and Freenet have dominated newspaper headlines, challenging traditional approaches to content distribution with their revolutionary use of peer-to-peer file-sharing technologies. Reporters try to sort out the ramifications of seemingly ungoverned peer-to-peer networks. Lawyers, business leaders, and social commentators debate the virtues and evils of these bold new distributed systems. But what{\textquoteright}s really behind such disruptive technologies -- the breakthrough innovations that have rocked the music and media worlds? And what lies ahead?
-In this book, key peer-to-peer pioneers take us beyond the headlines and hype and show how the technology is changing the way we communicate and exchange information. Those working to advance peer-to-peer as a technology, a business opportunity, and an investment offer their insights into how the technology has evolved and where it{\textquoteright}s going. They explore the problems they{\textquoteright}ve faced, the solutions they{\textquoteright}ve discovered, the lessons they{\textquoteright}ve learned, and their goals for the future of computer networking.
-
-Until now, Internet communities have been limited by the flat interactive qualities of email and network newsgroups, where people can exchange recommendations and ideas but have great difficulty commenting on one another{\textquoteright}s postings, structuring information, performing searches, and creating summaries. Peer-to-peer challenges the traditional authority of the client/server model, allowing shared information to reside instead with producers and users. Peer-to-peer networks empower users to collaborate on producing and consuming information, adding to it, commenting on it, and building communities around it.
-
+ abstract = {Upstart software projects Napster, Gnutella, and Freenet have dominated newspaper headlines, challenging traditional approaches to content distribution with their revolutionary use of peer-to-peer file-sharing technologies. Reporters try to sort out the ramifications of seemingly ungoverned peer-to-peer networks. Lawyers, business leaders, and social commentators debate the virtues and evils of these bold new distributed systems. But what{\textquoteright}s really behind such disruptive technologies -- the breakthrough innovations that have rocked the music and media worlds? And what lies ahead?
+In this book, key peer-to-peer pioneers take us beyond the headlines and hype and show how the technology is changing the way we communicate and exchange information. Those working to advance peer-to-peer as a technology, a business opportunity, and an investment offer their insights into how the technology has evolved and where it{\textquoteright}s going. They explore the problems they{\textquoteright}ve faced, the solutions they{\textquoteright}ve discovered, the lessons they{\textquoteright}ve learned, and their goals for the future of computer networking.
+
+Until now, Internet communities have been limited by the flat interactive qualities of email and network newsgroups, where people can exchange recommendations and ideas but have great difficulty commenting on one another{\textquoteright}s postings, structuring information, performing searches, and creating summaries. Peer-to-peer challenges the traditional authority of the client/server model, allowing shared information to reside instead with producers and users. Peer-to-peer networks empower users to collaborate on producing and consuming information, adding to it, commenting on it, and building communities around it.
+
This compilation represents the collected wisdom of today{\textquoteright}s peer-to-peer luminaries. It includes contributions from Gnutella{\textquoteright}s Gene Kan, Freenet{\textquoteright}s Brandon Wiley, Jabber{\textquoteright}s Jeremie Miller, and many others -- plus serious discussions of topics ranging from accountability and trust to security and performance. Fraught with questions and promise, peer-to-peer is sure to remain on the computer industry{\textquoteright}s center stage for years to come},
isbn = {059600110X},
www_section = {http://portal.acm.org/citation.cfm?id=558412$\#$},
@@ -14603,8 +14590,8 @@ This compilation represents the collected wisdom of today{\textquoteright}s peer
publisher = {Princeton University Press},
organization = {Princeton University Press},
address = {Princeton, New Jersey, USA},
- abstract = {Economics has much to do with incentives--not least, incentives to work hard, to produce quality products, to study, to invest, and to save. Although Adam Smith amply confirmed this more than two hundred years ago in his analysis of sharecropping contracts, only in recent decades has a theory begun to emerge to place the topic at the heart of economic thinking. In this book, Jean-Jacques Laffont and David Martimort present the most thorough yet accessible introduction to incentives theory to date. Central to this theory is a simple question as pivotal to modern-day management as it is to economics research: What makes people act in a particular way in an economic or business situation? In seeking an answer, the authors provide the methodological tools to design institutions that can ensure good incentives for economic agents.
-
+ abstract = {Economics has much to do with incentives--not least, incentives to work hard, to produce quality products, to study, to invest, and to save. Although Adam Smith amply confirmed this more than two hundred years ago in his analysis of sharecropping contracts, only in recent decades has a theory begun to emerge to place the topic at the heart of economic thinking. In this book, Jean-Jacques Laffont and David Martimort present the most thorough yet accessible introduction to incentives theory to date. Central to this theory is a simple question as pivotal to modern-day management as it is to economics research: What makes people act in a particular way in an economic or business situation? In seeking an answer, the authors provide the methodological tools to design institutions that can ensure good incentives for economic agents.
+
This book focuses on the principal-agent model, the "simple" situation where a principal, or company, delegates a task to a single agent through a contract--the essence of management and contract theory. How does the owner or manager of a firm align the objectives of its various members to maximize profits? Following a brief historical overview showing how the problem of incentives has come to the fore in the past two centuries, the authors devote the bulk of their work to exploring principal-agent models and various extensions thereof in light of three types of information problems: adverse selection, moral hazard, and non-verifiability. Offering an unprecedented look at a subject vital to industrial organization, labor economics, and behavioral economics, this book is set to become the definitive resource for students, researchers, and others who might find themselves pondering what contracts, and the incentives they embody, are really all about},
keywords = {economics, principal-agent model},
isbn = {9780691091846},
@@ -14881,7 +14868,7 @@ This book focuses on the principal-agent model, the "simple" situation where a p
year = {2000},
publisher = {Springer-Verlag, LNCS 1803},
organization = {Springer-Verlag, LNCS 1803},
- abstract = {A MIX net takes a list of ciphertexts (c 1, ..., c N) and outputs a permuted list of the plaintexts (m 1, ..., m N) without revealing the relationship between (c 1,..., c N) and (m 1, ...,m N). This paper first shows that the Jakobsson{\textquoteright}s MIX net of Eurocrypt{\textquoteright}98, which was believed to be resilient and very efficient, is broken. We next propose an efficient t-resilient MIX net with O(t 2) servers in which the cost of each MIX server is O(N). Two new concepts are introduced, existential-honesty and limited-open-verification. They will be useful for distributed computation in general.
+ abstract = {A MIX net takes a list of ciphertexts (c 1, ..., c N) and outputs a permuted list of the plaintexts (m 1, ..., m N) without revealing the relationship between (c 1,..., c N) and (m 1, ...,m N). This paper first shows that the Jakobsson{\textquoteright}s MIX net of Eurocrypt{\textquoteright}98, which was believed to be resilient and very efficient, is broken. We next propose an efficient t-resilient MIX net with O(t 2) servers in which the cost of each MIX server is O(N). Two new concepts are introduced, existential-honesty and limited-open-verification. They will be useful for distributed computation in general.
A part of this research was done while the author visited the Tokyo Institute of Technology, March 4--19, 1999. He was then at the University of Wisconsin {\textemdash} Milwaukee},
keywords = {existential-honesty, limited-open-verification, mix},
isbn = {978-3-540-67517-4},
@@ -14943,10 +14930,10 @@ A part of this research was done while the author visited the Tokyo Institute of
publisher = {USENIX Association},
organization = {USENIX Association},
address = {San Diego, California, USA},
- abstract = {Overcast is an application-level multicasting system that can be incrementally deployed using today{\textquoteright}s Internet infrastructure. These properties stem from Overcast{\textquoteright}s implementation as an overlay network. An overlay network consists of a collection of nodes placed at strategic locations in an existing network fabric. These nodes implement a network abstraction on top of the network provided by the underlying substrate network.
-
-Overcast provides scalable and reliable single-source multicast using a simple protocol for building efficient data distribution trees that adapt to changing network conditions. To support fast joins, Overcast implements a new protocol for efficiently tracking the global status of a changing distribution tree.
-
+ abstract = {Overcast is an application-level multicasting system that can be incrementally deployed using today{\textquoteright}s Internet infrastructure. These properties stem from Overcast{\textquoteright}s implementation as an overlay network. An overlay network consists of a collection of nodes placed at strategic locations in an existing network fabric. These nodes implement a network abstraction on top of the network provided by the underlying substrate network.
+
+Overcast provides scalable and reliable single-source multicast using a simple protocol for building efficient data distribution trees that adapt to changing network conditions. To support fast joins, Overcast implements a new protocol for efficiently tracking the global status of a changing distribution tree.
+
Results based on simulations confirm that Overcast provides its added functionality while performing competitively with IP Multicast. Simulations indicate that Overcast quickly builds bandwidth-efficient distribution trees that, compared to IP Multicast, provide 70\%-100\% of the total bandwidth possible, at a cost of somewhat less than twice the network load. In addition, Overcast adapts quickly to changes caused by the addition of new nodes or the failure of existing nodes without causing undue load on the multicast source},
keywords = {overcast, overlay network},
www_section = {http://dl.acm.org/citation.cfm?id=1251229.1251243},
@@ -15195,8 +15182,8 @@ Results based on simulations confirm that Overcast provides its added functional
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {Trier, Germany},
- abstract = {This paper considers algorithmic problems in a distributed setting where the participants cannot be assumed to follow the algorithm but rather their own self-interest. Such scenarios arise, in particular, when computers or users aim to cooperate or trade over the Internet. As such participants, termed agents, are capable of manipulating the algorithm, the algorithm designer should ensure in advance that the agents{\textquoteright} interests are best served by behaving correctly.
-
+ abstract = {This paper considers algorithmic problems in a distributed setting where the participants cannot be assumed to follow the algorithm but rather their own self-interest. Such scenarios arise, in particular, when computers or users aim to cooperate or trade over the Internet. As such participants, termed agents, are capable of manipulating the algorithm, the algorithm designer should ensure in advance that the agents{\textquoteright} interests are best served by behaving correctly.
+
This exposition presents a model to formally study such algorithms. This model, based on the field of mechanism design, is taken from the author{\textquoteright}s joint work with Amir Ronen, and is similar to approaches taken in the distributed AI community in recent years. Using this model, we demonstrate how some of the techniques of mechanism design can be applied towards distributed computation problems. We then exhibit some issues that arise in distributed computation which require going beyond the existing theory of mechanism design},
keywords = {algorithms, mechanism design, selfish agent},
isbn = {3-540-65691-X},
@@ -15617,7 +15604,7 @@ This exposition presents a model to formally study such algorithms. This model,
volume = {5},
year = {1997},
pages = {784--803},
- abstract = {This paper describes SRM (Scalable Reliable Multicast), a reliable multicast framework for light-weight sessions and application level framing. The algorithms of this framework are efficient, robust, and scale well to both very large networks and very large sessions. The SRM framework has been prototyped in wb, a distributed whiteboard application, which has been used on a global scale with sessions ranging from a few to a few hundred participants. The paper describes the principles that have guided the SRM design, including the IP multicast group delivery model, an end-to-end, receiver-based model of reliability, and the application level framing protocol model. As with unicast communications, the performance of a reliable multicast delivery algorithm depends on the underlying topology and operational environment. We investigate that dependence via analysis and simulation, and demonstrate an adaptive algorithm that uses the results of previous loss recovery events to adapt the control parameters used
+ abstract = {This paper describes SRM (Scalable Reliable Multicast), a reliable multicast framework for light-weight sessions and application level framing. The algorithms of this framework are efficient, robust, and scale well to both very large networks and very large sessions. The SRM framework has been prototyped in wb, a distributed whiteboard application, which has been used on a global scale with sessions ranging from a few to a few hundred participants. The paper describes the principles that have guided the SRM design, including the IP multicast group delivery model, an end-to-end, receiver-based model of reliability, and the application level framing protocol model. As with unicast communications, the performance of a reliable multicast delivery algorithm depends on the underlying topology and operational environment. We investigate that dependence via analysis and simulation, and demonstrate an adaptive algorithm that uses the results of previous loss recovery events to adapt the control parameters used
for future loss recovery. With the adaptive algorithm, our reliable multicast delivery algorithm provides good performance over a wide range of underlying topologies},
keywords = {computer network performance, computer networks, Internetworking},
issn = {1063-6692},
@@ -15853,8 +15840,8 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Berkeley, CA, USA},
- abstract = {Network Appliance Corporation recently began shipping a new kind of network server called an NFS file server appliance, which is a dedicated server whose sole function is to provide NFS file service. The file system requirements for an NFS appliance are different from those for a general-purpose UNIX system, both because an NFS appliance must be optimized for network file access and because an appliance must be easy to use.
-
+ abstract = {Network Appliance Corporation recently began shipping a new kind of network server called an NFS file server appliance, which is a dedicated server whose sole function is to provide NFS file service. The file system requirements for an NFS appliance are different from those for a general-purpose UNIX system, both because an NFS appliance must be optimized for network file access and because an appliance must be easy to use.
+
This paper describes WAFL (Write Anywhere File Layout), which is a file system designed specifically to work in an NFS appliance. The primary focus is on the algorithms and data structures that WAFL uses to implement Snapshotst, which are read-only clones of the active file system. WAFL uses a copy-on-write technique to minimize the disk space that Snapshots consume. This paper also describes how WAFL uses Snapshots to eliminate the need for file system consistency checking after an unclean shutdown},
www_section = {http://portal.acm.org/citation.cfm?id=1267093$\#$},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.40.3691.pdf},
@@ -15930,8 +15917,8 @@ This paper describes WAFL (Write Anywhere File Layout), which is a file system d
pages = {0--144},
publisher = {Springer},
organization = {Springer},
- abstract = {Elliptic curves have been intensively studied in algebraic geometry and number theory. In recent years they have been used in devising efficient algorithms for factoring integers and primality proving, and in the construction of public key cryptosystems.
-Elliptic Curve Public Key Cryptosystems provides an up-to-date and self-contained treatment of elliptic curve-based public key cryptology. Elliptic curve cryptosystems potentially provide equivalent security to the existing public key schemes, but with shorter key lengths. Having short key lengths means smaller bandwidth and memory requirements and can be a crucial factor in some applications, for example the design of smart card systems. The book examines various issues which arise in the secure and efficient implementation of elliptic curve systems.
+ abstract = {Elliptic curves have been intensively studied in algebraic geometry and number theory. In recent years they have been used in devising efficient algorithms for factoring integers and primality proving, and in the construction of public key cryptosystems.
+Elliptic Curve Public Key Cryptosystems provides an up-to-date and self-contained treatment of elliptic curve-based public key cryptology. Elliptic curve cryptosystems potentially provide equivalent security to the existing public key schemes, but with shorter key lengths. Having short key lengths means smaller bandwidth and memory requirements and can be a crucial factor in some applications, for example the design of smart card systems. The book examines various issues which arise in the secure and efficient implementation of elliptic curve systems.
Elliptic Curve Public Key Cryptosystems is a valuable reference resource for researchers in academia, government and industry who are concerned with issues of data security. Because of the comprehensive treatment, the book is also suitable for use as a text for advanced courses on the subject},
keywords = {algebraic geometry, elliptic curve cryptography, number theory, public key cryptosystem},
isbn = {978-0-7923-9368-9},
@@ -16028,11 +16015,11 @@ Elliptic Curve Public Key Cryptosystems is a valuable reference resource for res
publisher = {Springer-Verlag New York, Inc},
organization = {Springer-Verlag New York, Inc},
address = {Houthalen, Belgium},
- abstract = {In Journal of Cryptology 1/1 (1988) 65-75 (= [Chau_88]), David Chaum describes a beautiful technique, the DC-net, which should allow participants to send and receive messages anonymously in an arbitrary network. The untraceability of the senders is proved to be unconditional, but that of the recipients implicitly
-assumes a reliable broadcast network. This assumption is unrealistic in some networks, but it can be removed completely by using the fail-stop key generation schemes by Waidner (these proceedings, =[Waid_89]). In both cases, however, each participant can untraceably and permanently disrupt the entireDC-net.
-We present a protocol which guarantees unconditional untraceability, the original goal of the DC-net, onthe inseparability assumption (i.e. the attacker must be unable to prevent honest participants fromcommunicating, which is considerably less than reliable broadcast), and computationally secureserviceability: Computationally restricted disrupters can be identified and removed from the DC-net.
-On the one hand, our solution is based on the lovely idea by David Chaum [Chau_88 {\textsection} 2.5] of setting traps for disrupters. He suggests a scheme to guarantee unconditional untraceability and computationally secure serviceability, too, but on the reliable broadcast assumption. The same scheme seems to be used by Bos and den Boer (these proceedings, = [BoBo_89]). We show that this scheme needs some changes and refinements before being secure, even on the reliable broadcast assumption.
-On the other hand, our solution is based on the idea of digital signatures whose forgery by an unexpectedly powerful attacker is provable, which might be of independent interest. We propose such a (one-time) signature scheme based on claw-free permutation pairs; the forgery of signatures is equivalent to finding claws, thus in a special case to the factoring problem. In particular, with such signatures we can, for the first time, realize fail-stop Byzantine Agreement, and also adaptive Byzantine Agreement, i.e. Byzantine Agreement which can only be disrupted by an attacker who controls at least a third of all participants and who can forge signatures.
+ abstract = {In Journal of Cryptology 1/1 (1988) 65-75 (= [Chau_88]), David Chaum describes a beautiful technique, the DC-net, which should allow participants to send and receive messages anonymously in an arbitrary network. The untraceability of the senders is proved to be unconditional, but that of the recipients implicitly
+assumes a reliable broadcast network. This assumption is unrealistic in some networks, but it can be removed completely by using the fail-stop key generation schemes by Waidner (these proceedings, =[Waid_89]). In both cases, however, each participant can untraceably and permanently disrupt the entireDC-net.
+We present a protocol which guarantees unconditional untraceability, the original goal of the DC-net, onthe inseparability assumption (i.e. the attacker must be unable to prevent honest participants fromcommunicating, which is considerably less than reliable broadcast), and computationally secureserviceability: Computationally restricted disrupters can be identified and removed from the DC-net.
+On the one hand, our solution is based on the lovely idea by David Chaum [Chau_88 {\textsection} 2.5] of setting traps for disrupters. He suggests a scheme to guarantee unconditional untraceability and computationally secure serviceability, too, but on the reliable broadcast assumption. The same scheme seems to be used by Bos and den Boer (these proceedings, = [BoBo_89]). We show that this scheme needs some changes and refinements before being secure, even on the reliable broadcast assumption.
+On the other hand, our solution is based on the idea of digital signatures whose forgery by an unexpectedly powerful attacker is provable, which might be of independent interest. We propose such a (one-time) signature scheme based on claw-free permutation pairs; the forgery of signatures is equivalent to finding claws, thus in a special case to the factoring problem. In particular, with such signatures we can, for the first time, realize fail-stop Byzantine Agreement, and also adaptive Byzantine Agreement, i.e. Byzantine Agreement which can only be disrupted by an attacker who controls at least a third of all participants and who can forge signatures.
We also sketch applications of these signatures to a payment system, solving disputes about shared secrets, and signatures which cannot be shown round},
keywords = {anonymity, arbitrary network, cryptology, DC-net},
isbn = {3-540-53433-4},