commit 5004e72e09bb0439ccf5f922e26d60c829269bcb
parent 5514cf1a012d4e10955c20d83f85f9e1cb73db5a
Author: ng0 <ng0@n0.is>
Date: Tue, 23 Jan 2018 11:26:24 +0000
more parens
Diffstat:
| M | gnunetbib.bib | | | 51 | +++++++++++++++++++++++---------------------------- |
1 file changed, 23 insertions(+), 28 deletions(-)
diff --git a/gnunetbib.bib b/gnunetbib.bib
@@ -5725,7 +5725,7 @@ Today with the advent of Peer-to-Peer technology, distributed file systems that
The System Architecture Group at the University of Karlsruhe in Germany has developedsuch a file system, which is built on top of a structured overlay network and uses Distributed Hash Tables to store and access the information.
One problem with this approach is, that each file system can only be accessed with the help of an identifier, which changes whenever a file system is modified. All clients have to be notified of the new identifier in a secure, fast and reliable way.
Usually the strategy to solve this type of problem is an encrypted multicast. This thesis presents and analyses several strategies of using multicast distributions to solve this problem and then unveils our final solution based on the Subset Difference method proposed by Naor et al.
-},
+}},
keywords = {distributed file system, distributed hash table, peer-to-peer networking, store information},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Amann\%20-\%20Secure\%20asynchronous\%20change\%20notifications.pdf},
author = {Bernhard Amann}
@@ -5889,7 +5889,7 @@ Usually the strategy to solve this type of problem is an encrypted multicast. Th
abstract = {{This paper investigates the problem of designing anonymity networks that meet application-specific performance and security constraints. We argue that existing anonymity networks take a narrow view of performance by considering only the strength of the offered anonymity. However, real-world applications impose a myriad of communication requirements, including end-to-end bandwidth and latency, trustworthiness of intermediary routers, and network jitter.
We pose a grand challenge for anonymity: the development of a network architecture that enables applications to customize routes that tradeoff between anonymity and performance. Towards this challenge, we present the Application-Aware Anonymity (A3) routing service. We envision that A3 will serve as a powerful and flexible anonymous communications layer that will spur the future development of anonymity services.
-},
+}},
keywords = {anonymity, routing},
url = {http://portal.acm.org/citation.cfm?id=1361423},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/a3.pdf},
@@ -6652,7 +6652,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
abstract = {{Location-hidden services, as offered by anonymity systems such as Tor, allow servers to be operated under a pseudonym. As Tor is an overlay network, servers hosting hidden services are accessible both directly and over the anonymous channel. Traffic patterns through one channel have observable effects on the other, thus allowing a service{\textquoteright}s pseudonymous identity and IP address to be linked. One proposed solution to this vulnerability is for Tor nodes to provide fixed quality of service to each connection, regardless of other traffic, thus reducing capacity but resisting such interference attacks. However, even if each connection does not influence the others, total throughput would still affect the load on the CPU, and thus its heat output. Unfortunately for anonymity, the result of temperature on clock skew can be remotely detected through observing timestamps. This attack works because existing abstract models of anonymity-network nodes do not take into account the inevitable imperfections of the hardware they run on. Furthermore, we suggest the same technique could be exploited as a classical covert channel and can even provide geolocation.
-},
+}},
keywords = {anonymity, clock skew, covert channels, fingerprinting, Tor},
isbn = {1-59593-518-5},
doi = {10.1145/1180405.1180410},
@@ -6669,7 +6669,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar
organization = {ACM Press},
address = {New York, NY, USA},
abstract = {{We create a credential system that lets a user anonymously authenticate at most $n$ times in a single time period. A user withdraws a dispenser of n e-tokens. She shows an e-token to a verifier to authenticate herself; each e-token can be used only once, however, the dispenser automatically refreshes every time period. The only prior solution to this problem, due to Damg{\r a}rd et al. [29], uses protocols that are a factor of k slower for the user and verifier, where k is the security parameter. Damg{\r a}rd et al. also only support one authentication per time period, while we support n. Because our construction is based on e-cash, we can use existing techniques to identify a cheating user, trace all of her e-tokens, and revoke her dispensers. We also offer a new anonymity service: glitch protection for basically honest users who (occasionally) reuse e-tokens. The verifier can always recognize a reused e-token; however, we preserve the anonymity of users who do not reuse e-tokens too often.
-},
+},}
keywords = {clone detection, credentials, n-anonymous authentication},
isbn = {1-59593-518-5},
doi = {10.1145/1180405.1180431},
@@ -6703,7 +6703,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar
organization = {Springer},
address = {Cambridge, UK},
abstract = {{The so-called {\textquotedblleft}Great Firewall of China{\textquotedblright} operates, in part, by inspecting TCP packets for keywords that are to be blocked. If the keyword is present, TCP reset packets (viz: with the RST flag set) are sent to both endpoints of the connection, which then close. However, because the original packets are passed through the firewall unscathed, if the endpoints completely ignore the firewall{\textquoteright}s resets, then the connection will proceed unhindered. Once one connection has been blocked, the firewall makes further easy-to-evade attempts to block further connections from the same machine. This latter behaviour can be leveraged into a denial-of-service attack on third-party machines.
-},
+}},
isbn = {978-3-540-68790-0},
doi = {10.1007/11957454},
url = {http://www.springerlink.com/content/7224582654260k03/},
@@ -6743,7 +6743,7 @@ The algorithms have been implemented in a middleware called the Distributed k-ar
publisher = {ACM},
organization = {ACM},
address = {Ann Arbor, Michigan, USA},
- abstract = {{In this paper we argue that a robust incentive mechanism is important in a real-world peer-to-peer streaming system to ensure that nodes contribute as much upload bandwidth as they can. We show that simple tit-for-tat mechanisms which work well in file-sharing systems like BitTorrent do not perform well given the additional delay and bandwidth constraints imposed by live streaming. We present preliminary experimental results for an incentive mechanism based on the Iterated Prisoner{\textquoteright}s Dilemma problem that allows all nodes to download with low packet loss when there is sufficient capacity in the system, but when the system is resource-starved, nodes that contribute upload bandwidth receive better service than those that do not. Moreover, our algorithm does not require nodes to rely on any information other than direct observations of its neighbors {\textquoteright} behavior towards it. },
+ abstract = {{In this paper we argue that a robust incentive mechanism is important in a real-world peer-to-peer streaming system to ensure that nodes contribute as much upload bandwidth as they can. We show that simple tit-for-tat mechanisms which work well in file-sharing systems like BitTorrent do not perform well given the additional delay and bandwidth constraints imposed by live streaming. We present preliminary experimental results for an incentive mechanism based on the Iterated Prisoner{\textquoteright}s Dilemma problem that allows all nodes to download with low packet loss when there is sufficient capacity in the system, but when the system is resource-starved, nodes that contribute upload bandwidth receive better service than those that do not. Moreover, our algorithm does not require nodes to rely on any information other than direct observations of its neighbors {\textquoteright} behavior towards it. }},
keywords = {peer-to-peer streaming, tit-for-tat},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/NetEcon\%2706\%20-\%20Improving\%20robustness\%20of\%20p2p\%20streaming.pdf},
author = {Vinay Pai and Alexander E. Mohr}
@@ -6952,7 +6952,7 @@ The author has managed to put the currency system into practical use since June
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
abstract = {{Many applications of mix networks such as anonymousWeb browsing require relationship anonymity: it should be hard for the attacker to determine who is communicating with whom. Conventional methods for measuring anonymity, however, focus on sender anonymity instead. Sender anonymity guarantees that it is difficult for the attacker to determine the origin of any given message exiting the mix network, but this may not be sufficient to ensure relationship anonymity. Even if the attacker cannot identify the origin of messages arriving to some destination, relationship anonymity will fail if he can determine with high probability that at least one of the messages originated from a particular sender, without necessarily being able to recognize this message among others. We give a formal definition and a calculation methodology for relationship anonymity. Our techniques are similar to those used for sender anonymity, but, unlike sender anonymity, relationship anonymity is sensitive to the distribution of message destinations. In particular, Zipfian distributions with skew values characteristic of Web browsing provide especially poor relationship anonymity. Our methodology takes route selection algorithms into account, and incorporates information-theoretic metrics such as entropy and min-entropy. We illustrate our methodology by calculating relationship anonymity in several simulated mix networks.
-},
+}},
keywords = {anonymity, privacy},
isbn = {1-59593-556-8},
doi = {10.1145/1179601.1179611},
@@ -7261,7 +7261,7 @@ two shallow circuits: one for generating many arbitrarily but identically biased
publisher = {IEEE Press},
address = {Piscataway, NJ, USA},
abstract = {{LT-codes are a new class of codes introduced by Luby for the purpose of scalable and fault-tolerant distribution of data over computer networks. In this paper, we introduce Raptor codes, an extension of LT-codes with linear time encoding and decoding. We will exhibit a class of universal Raptor codes: for a given integer k and any real ε > 0, Raptor codes in this class produce a potentially infinite stream of symbols such that any subset of symbols of size k(1 + ε) is sufficient to recover the original k symbols with high probability. Each output symbol is generated using O(log(1/ ε)) operations, and the original symbols are recovered from the collected ones with O(k log(1/ε)) operations.We will also introduce novel techniques for the analysis of the error probability of the decoder for finite length Raptor codes. Moreover, we will introduce and analyze systematic versions of Raptor codes, i.e., versions in which the first output elements of the coding system coincide with the original k elements.
-},
+}},
keywords = {802.11, encoding, erasure coding},
issn = {1063-6692},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/raptor.pdf},
@@ -7335,7 +7335,7 @@ two shallow circuits: one for generating many arbitrarily but identically biased
booktitle = {Sensor and Ad Hoc Communications and Networks, 2006. SECON {\textquoteright}06. 2006 3rd Annual IEEE Communications Society on },
year = {2006},
month = {09/2006},
- abstract = {{Routing in wireless networks is inherently difficult since their network topologies are typically unstructured and unstable. Therefore, many routing protocols for ad-hoc networks and sensor networks revert to flooding to acquire routes to previously unknown destinations. However, such an approach does not scale to large networks, especially when nodes need to communicate with many different destinations. This paper advocates a novel approach, the scalable source routing (SSR) protocol. It combines overlay-like routing in a virtual network structure with source routing in the physical network structure. As a consequence, SSR can efficiently provide the routing semantics of a structured routing overlay, making it an efficient basis for the scalable implementation of fully decentralized applications. In T. Fuhrmann (2005) it has been demonstrated that SSR can almost entirely avoid flooding, thus leading to a both memory and message efficient routing mechanism for large unstructured networks. This paper extends SSR to unstable networks, i. e. networks with churn where nodes frequently join and leave, the latter potentially ungracefully},
+ abstract = {{Routing in wireless networks is inherently difficult since their network topologies are typically unstructured and unstable. Therefore, many routing protocols for ad-hoc networks and sensor networks revert to flooding to acquire routes to previously unknown destinations. However, such an approach does not scale to large networks, especially when nodes need to communicate with many different destinations. This paper advocates a novel approach, the scalable source routing (SSR) protocol. It combines overlay-like routing in a virtual network structure with source routing in the physical network structure. As a consequence, SSR can efficiently provide the routing semantics of a structured routing overlay, making it an efficient basis for the scalable implementation of fully decentralized applications. In T. Fuhrmann (2005) it has been demonstrated that SSR can almost entirely avoid flooding, thus leading to a both memory and message efficient routing mechanism for large unstructured networks. This paper extends SSR to unstable networks, i. e. networks with churn where nodes frequently join and leave, the latter potentially ungracefully}},
keywords = {ad-hoc networks, scalable source routing},
isbn = {1-4244-0626-9 },
doi = {10.1109/SAHCN.2006.288406 },
@@ -7350,7 +7350,7 @@ two shallow circuits: one for generating many arbitrarily but identically biased
abstract = {{Although the benefits of information sharing between supply-chain partners are well known, many companies are averse to share their {\textquotedblleft}private{\textquotedblright} information due to fear of adverse impact of information leakage.
This paper uses techniques from Secure Multiparty Computation (SMC) to develop {\textquotedblleft}secure protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting, and Replenishment) business process. The result is a process that permits supply-chain partners to capture all of the benefits of information-sharing and collaborative decision-making, but without disclosing their {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions) and cost information to one another. In our collaborative CPFR) scenario, the retailer and supplier engage in SMC protocols that result in: (1) a forecast that uses both the retailers and the suppliers observed demand signals to better forecast demand; and (2) prescribed order/shipment quantities based on system-wide costs and inventory levels (and on the joint forecasts) that minimize supply-chain expected cost/period. Our contributions are as follows: (1) we demonstrate that CPFR can be securely implemented without disclosing the private information of either partner; (2) we show that the CPFR business process is not incentive compatible without transfer payments and develop an incentive-compatible linear transfer-payment scheme for
collaborative forecasting; (3) we demonstrate that our protocols are not only secure (i.e., privacy preserving), but that neither partner is able to make accurate inferences about the others future demand signals from the outputs of the protocols; and (4) we illustrate the benefits of secure collaboration using simulation.
-},
+}},
keywords = {chain computation management, CPFR, privacy, secure multi-party computation, secure supply, security, SMC},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Secure\%20Collaborative\%20Planning\%20Forecasting\%20and\%20Replenishment.pdf},
author = {Atallah, Mikhail and Marina Blanton and Vinayak Deshpand and Frikken, Keith and Li, Jiangtao and Leroy Schwarz}
@@ -7584,7 +7584,7 @@ This result immediately implies solutions to other long-standing open problems s
organization = {Springer},
address = {Cambridge, UK},
abstract = {{Location hidden services have received increasing attention as a means to resist censorship and protect the identity of service operators. Research and vulnerability analysis to date has mainly focused on how to locate the hidden service. But while the hiding techniques have improved, almost no progress has been made in increasing the resistance against DoS attacks directly or indirectly on hidden services. In this paper we suggest improvements that should be easy to adopt within the existing hidden service design, improvements that will both reduce vulnerability to DoS attacks and add QoS as a service option. In addition we show how to hide not just the location but the existence of the hidden service from everyone but the users knowing its service address. Not even the public directory servers will know how a private hidden service can be contacted, or know it exists.
-},
+}},
keywords = {censorship resistance, information hiding},
isbn = {978-3-540-68790-0},
doi = {10.1007/11957454},
@@ -7602,7 +7602,7 @@ This result immediately implies solutions to other long-standing open problems s
pages = {241{\textendash}255},
publisher = {Springer},
abstract = {{A shuffle takes a list of ciphertexts and outputs a permuted list of re-encryptions of the input ciphertexts. Mix-nets, a popular method for anonymous routing, can be constructed from a sequence of shuffles and decryption. We propose a formal model for security of verifiable shuffles and a new verifiable shuffle system based on the Paillier encryption scheme, and prove its security in the proposed dmodel. The model is general and can be extended to provide provable security for verifiable shuffle decryption.
-},
+}},
keywords = {formal security model, paillier public-key system, privacy, verifiable shuffles},
issn = {1615-5262},
doi = {10.1007/s10207-006-0004-8},
@@ -7682,7 +7682,7 @@ We show that applying encoding based on universal re-encryption can solve many o
organization = {ACM},
address = {New York, NY, USA},
abstract = {{This paper evaluates the ability of a wireless mesh architecture to provide high performance Internet access while demanding little deployment planning or operational management. The architecture considered in this paper has unplanned node placement (rather than planned topology), omni-directional antennas (rather than directional links), and multi-hop routing (rather than single-hop base stations). These design decisions contribute to ease of deployment, an important requirement for community wireless networks. However, this architecture carries the risk that lack of planning might render the network{\textquoteright}s performance unusably low. For example, it might be necessary to place nodes carefully to ensure connectivity; the omni-directional antennas might provide uselessly short radio ranges; or the inefficiency of multi-hop forwarding might leave some users effectively disconnected.The paper evaluates this unplanned mesh architecture with a case study of the Roofnet 802.11b mesh network. Roofnet consists of 37 nodes spread over four square kilometers of an urban area. The network provides users with usable performance despite lack of planning: the average inter-node throughput is 627 kbits/second, even though the average route has three hops.The paper evaluates multiple aspects of the architecture: the effect of node density on connectivity and throughput; the characteristics of the links that the routing protocol elects to use; the usefulness of the highly connected mesh afforded by omni-directional antennas for robustness and throughput; and the potential performance of a single-hop network using the same nodes as Roofnet.
-},
+}},
keywords = {ad-hoc networks, mesh networks, multi-hop networks, route metrics, wireless routing},
isbn = {1-59593-020-5},
doi = {10.1145/1080829.1080833},
@@ -9283,7 +9283,7 @@ In this paper we present a novel routing approach that is capable of handling co
type = {publication},
address = {Sydney, Australia},
abstract = {{In this paper, we briefly present a novel routing algorithm, scalable source routing (SSR), which is capable of memory and message efficient routing in networks with {\textquoteright}random topology{\textquoteright}. This algorithm enables sensor networks to use recent peer to-peer mechanisms from the field of overlay networks, like e.g. distributed hash tables and indirection infrastructures. Unlike other proposals along that direction, SSR integrates all necessary routing tasks into one simple, highly efficient routing protocol. Simulations demonstrate that in a small-world network with more than 100 000 nodes, SSR requires each node to only store routing data for 255 other nodes to establish routes between arbitrary pairs of nodes. These routes are on average only about 20-30\% longer than the globally optimal path between these nodes.
-},
+}},
keywords = {scalable source routing, topology matching},
isbn = {0-7803-9246-9},
url = {http://i30www.ira.uka.de/research/publications/p2p/},
@@ -10507,7 +10507,7 @@ In this paper we improve these results: we show that the same level of unlinkabi
organization = {USENIX Association},
address = {Berkeley, CA, USA},
abstract = {{Ongoing advancements in technology lead to ever-increasing storage capacities. In spite of this, optimizing storage usage can still provide rich dividends. Several techniques based on delta-encoding and duplicate block suppression have been shown to reduce storage overheads, with varying requirements for resources such as computation and memory. We propose a new scheme for storage reduction that reduces data sizes with an effectiveness comparable to the more expensive techniques, but at a cost comparable to the faster but less effective ones. The scheme, called Redundancy Elimination at the Block Level (REBL), leverages the benefits of compression, duplicate block suppression, and delta-encoding to eliminate a broad spectrum of redundant data in a scalable and efficient manner. REBL generally encodes more compactly than compression (up to a factor of 14) and a combination of compression and duplicate suppression (up to a factor of 6.7). REBL also encodes similarly to a technique based on delta-encoding, reducing overall space significantly in one case. Furthermore, REBL uses super-fingerprints, a technique that reduces the data needed to identify similar blocks while dramatically reducing the computational requirements of matching the blocks: it turns O(n2) comparisons into hash table lookups. As a result, using super-fingerprints to avoid enumerating matching data objects decreases computation in the resemblance detection phase of REBL by up to a couple orders of magnitude.
-},
+}},
url = {http://portal.acm.org/citation.cfm?id=1247420$\#$},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.91.8331.pdf},
author = {Kulkarni, Purushottam and Douglis, Fred and Jason Lavoie and Tracey, John M.}}
@@ -10658,7 +10658,7 @@ This paper describes the design and implementation of a secure, reliable, and sc
abstract = {{Developing sensor network applications demands a new set of tools to aid programmers. A number of simulation environments have been developed that provide varying degrees of scalability, realism, and detail for understanding the behavior of sensor networks. To date, however, none of these tools have addressed one of the most important aspects of sensor application design: that of power consumption. While simple approximations of overall power usage can be derived from estimates of node duty cycle and communication rates, these techniques often fail to capture the detailed, low-level energy requirements of the CPU, radio, sensors, and other peripherals.
In this paper, we present, a scalable simulation environment for wireless sensor networks that provides an accurate, per-node estimate of power consumption. PowerTOSSIM is an extension to TOSSIM, an event-driven simulation environment for TinyOS applications. In PowerTOSSIM, TinyOS components corresponding to specific hardware peripherals (such as the radio, EEPROM, LEDs, and so forth) are instrumented to obtain a trace of each device{\textquoteright}s activity during the simulation runPowerTOSSIM employs a novel code-transformation technique to estimate the number of CPU cycles executed by each node, eliminating the need for expensive instruction-level simulation of sensor nodes. PowerTOSSIM includes a detailed model of hardware energy consumption based on the Mica2 sensor node platform. Through instrumentation of actual sensor nodes, we demonstrate that PowerTOSSIM provides accurate estimation of power consumption for a range of applications and scales to support very large simulations.
-},
+}},
keywords = {sensor networks, TinyOS},
doi = {10.1145/1031495.1031518},
url = {http://portal.acm.org/citation.cfm?id=1031495.1031518},
@@ -11287,7 +11287,7 @@ participants in this context. The approach presented does not use credentials o
GNUnet aims to provide anonymity for its users. Its design
makes it hard to link a transaction to the node where it originated from. While anonymity requirements make a global view of the end-points of a transaction infeasible, the local link-to-link messages can be fully authenticated. Our economic model is based entirely on this local view of the network and takes only local
decisions.
-},
+}},
keywords = {anonymity, file-sharing, GNUnet},
url = {http://grothoff.org/christian/ebe.pdf},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/ebe.pdf},
@@ -11505,7 +11505,7 @@ In this paper, we propose a new P2P routing algorithm - - HIERAS to relieve this
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
address = {Washington D.C., USA},
- abstract = {{Informally, a communication protocol is sender k - anonymous if it can guarantee that an adversary, trying to determine the sender of a particular message, can only narrow down its search to a set of k suspects. Receiver k-anonymity places a similar guarantee on the receiver: an adversary, at best, can only narrow down the possible receivers to a set of size k. In this paper we introduce the notions of sender and receiver k-anonymity and consider their applications. We show that there exist simple and e$\#$cient protocols which are k-anonymous for both the sender and the receiver in a model where a polynomial time adversary can see all tra$\#$c in the network and can control up to a constant fraction of the participants. Our protocol is provably secure, practical, and does not require the existence of trusted third parties. This paper also provides a conceptually simple augmentation to Chaum{\textquoteright}s DC-Nets that adds robustness against adversaries who attempt to disrupt the protocol through perpetual transmission or selective non-participation},
+ abstract = {{Informally, a communication protocol is sender k - anonymous if it can guarantee that an adversary, trying to determine the sender of a particular message, can only narrow down its search to a set of k suspects. Receiver k-anonymity places a similar guarantee on the receiver: an adversary, at best, can only narrow down the possible receivers to a set of size k. In this paper we introduce the notions of sender and receiver k-anonymity and consider their applications. We show that there exist simple and e$\#$cient protocols which are k-anonymous for both the sender and the receiver in a model where a polynomial time adversary can see all tra$\#$c in the network and can control up to a constant fraction of the participants. Our protocol is provably secure, practical, and does not require the existence of trusted third parties. This paper also provides a conceptually simple augmentation to Chaum{\textquoteright}s DC-Nets that adds robustness against adversaries who attempt to disrupt the protocol through perpetual transmission or selective non-participation}},
isbn = {1-58113-738-9},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.9348\&rep=rep1\&type=url\&i=2},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/k-anonymous_ccs2003.pdf},
@@ -11579,8 +11579,7 @@ In this paper, we propose a new P2P routing algorithm - - HIERAS to relieve this
publisher = {ACM},
organization = {ACM},
address = {New York, NY, USA},
- abstract = {{Napster pioneered the idea of peer-to-peer file sharing, and supported it with a centralized file search facility. Subsequent P2P systems like Gnutella adopted decentralized search algorithms. However, Gnutella{\textquoteright}s notoriously poor scaling led some to propose distributed hash table solutions to the wide-area file search problem. Contrary to that trend, we advocate retaining Gnutella{\textquoteright}s simplicity while proposing new mechanisms that greatly improve its scalability. Building upon prior research [1, 12, 22], we propose several modifications to Gnutella{\textquoteright}s design that dynamically adapt the overlay topology and the search algorithms in order to accommodate the natural heterogeneity present in most peer-to-peer systems. We test our design through simulations and the results show three to five orders of magnitude improvement in total system capacity. We also report on a prototype implementation and its deployment on a testbed.
-},
+ abstract = {{Napster pioneered the idea of peer-to-peer file sharing, and supported it with a centralized file search facility. Subsequent P2P systems like Gnutella adopted decentralized search algorithms. However, Gnutella{\textquoteright}s notoriously poor scaling led some to propose distributed hash table solutions to the wide-area file search problem. Contrary to that trend, we advocate retaining Gnutella{\textquoteright}s simplicity while proposing new mechanisms that greatly improve its scalability. Building upon prior research [1, 12, 22], we propose several modifications to Gnutella{\textquoteright}s design that dynamically adapt the overlay topology and the search algorithms in order to accommodate the natural heterogeneity present in most peer-to-peer systems. We test our design through simulations and the results show three to five orders of magnitude improvement in total system capacity. We also report on a prototype implementation and its deployment on a testbed.}},
keywords = {distributed hash table, Gnutella, P2P},
isbn = {1-58113-735-4},
doi = {10.1145/863955.864000},
@@ -11596,8 +11595,7 @@ In this paper, we propose a new P2P routing algorithm - - HIERAS to relieve this
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Berkeley, CA, USA},
- abstract = {{Versioning file systems retain earlier versions of modified files, allowing recovery from user mistakes or system corruption. Unfortunately, conventional versioning systems do not efficiently record large numbers of versions. In particular, versioned metadata can consume as much space as versioned data. This paper examines two space-efficient metadata structures for versioning file systems and describes their integration into the Comprehensive Versioning File System (CVFS), which keeps all versions of all files. Journal-based metadata encodes each metadata version into a single journal entry; CVFS uses this structure for inodes and indirect blocks, reducing the associated space requirements by 80\%. Multiversion b-trees extend each entrys key with a timestamp and keep current and historical entries in a single tree; CVFS uses this structure for directories, reducing the associated space requirements by 99\%. Similar space reductions are predicted via trace analysis for other versioning strategies (e.g., on-close versioning). Experiments with CVFS verify that its current-version performance is sim-ilar to that of non-versioning file systems while reducing overall space needed for history data by a factor of two. Although access to historical versions is slower than con-ventional versioning systems, checkpointing is shown to mitigate and bound this effect.
-},
+ abstract = {{Versioning file systems retain earlier versions of modified files, allowing recovery from user mistakes or system corruption. Unfortunately, conventional versioning systems do not efficiently record large numbers of versions. In particular, versioned metadata can consume as much space as versioned data. This paper examines two space-efficient metadata structures for versioning file systems and describes their integration into the Comprehensive Versioning File System (CVFS), which keeps all versions of all files. Journal-based metadata encodes each metadata version into a single journal entry; CVFS uses this structure for inodes and indirect blocks, reducing the associated space requirements by 80\%. Multiversion b-trees extend each entrys key with a timestamp and keep current and historical entries in a single tree; CVFS uses this structure for directories, reducing the associated space requirements by 99\%. Similar space reductions are predicted via trace analysis for other versioning strategies (e.g., on-close versioning). Experiments with CVFS verify that its current-version performance is sim-ilar to that of non-versioning file systems while reducing overall space needed for history data by a factor of two. Although access to historical versions is slower than con-ventional versioning systems, checkpointing is shown to mitigate and bound this effect.}},
keywords = {file systems},
url = {http://portal.acm.org/citation.cfm?id=1090694.1090700$\#$},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/fast03.pdf},
@@ -11611,8 +11609,7 @@ In this paper, we propose a new P2P routing algorithm - - HIERAS to relieve this
pages = {48{\textendash}65},
publisher = {Springer-Verlag, LNCS 2760},
organization = {Springer-Verlag, LNCS 2760},
- abstract = {{This paper considers systems for Traffic Analysis Prevention (TAP) in a theoretical model. It considers TAP based on padding and rerouting of messages and describes the effects each has on the difference between the actual and the observed traffic matrix (TM). The paper introduces an entropy-based approach to the amount of uncertainty a global passive adversary has in determining the actual TM, or alternatively, the probability that the actual TM has a property of interest. Unlike previous work, the focus is on determining the overall amount of anonymity a TAP system can provide, or the amount it can provide for a given cost in padding and rerouting, rather than on the amount of protection a afforded particular communications.
-},
+ abstract = {{This paper considers systems for Traffic Analysis Prevention (TAP) in a theoretical model. It considers TAP based on padding and rerouting of messages and describes the effects each has on the difference between the actual and the observed traffic matrix (TM). The paper introduces an entropy-based approach to the amount of uncertainty a global passive adversary has in determining the actual TM, or alternatively, the probability that the actual TM has a property of interest. Unlike previous work, the focus is on determining the overall amount of anonymity a TAP system can provide, or the amount it can provide for a given cost in padding and rerouting, rather than on the amount of protection a afforded particular communications.}},
keywords = {traffic analysis, traffic matrix},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/steinbrecher-pet2003_0.pdf},
author = {Richard E. Newman and Ira S. Moskowitz and Paul Syverson and Andrei Serjantov},
@@ -13819,8 +13816,7 @@ We further investigate this intriguing proposal. Specifically, we
month = {04/2003},
publisher = {Springer-Verlag, LNCS 2482},
organization = {Springer-Verlag, LNCS 2482},
- abstract = {{In this paper we look closely at the popular metric of anonymity, the anonymity set, and point out a number of problems associated with it. We then propose an alternative information theoretic measure of anonymity which takes into account the probabilities of users sending and receiving the messages and show how to calculate it for a message in a standard mix-based anonymity system. We also use our metric to compare a pool mix to a traditional threshold mix, which was impossible using anonymity sets. We also show how the maximum route length restriction which exists in some fielded anonymity systems can lead to the attacker performing more powerful traffic analysis. Finally, we discuss open problems and future work on anonymity measurements.
-}},
+ abstract = {{In this paper we look closely at the popular metric of anonymity, the anonymity set, and point out a number of problems associated with it. We then propose an alternative information theoretic measure of anonymity which takes into account the probabilities of users sending and receiving the messages and show how to calculate it for a message in a standard mix-based anonymity system. We also use our metric to compare a pool mix to a traditional threshold mix, which was impossible using anonymity sets. We also show how the maximum route length restriction which exists in some fielded anonymity systems can lead to the attacker performing more powerful traffic analysis. Finally, we discuss open problems and future work on anonymity measurements.}},
keywords = {anonymity measurement, traffic analysis},
isbn = {978-3-540-00565-0},
doi = {10.1007/3-540-36467-6},
@@ -13845,8 +13841,7 @@ We further investigate this intriguing proposal. Specifically, we
month = {04/2003},
publisher = {Springer-Verlag, LNCS 2482},
organization = {Springer-Verlag, LNCS 2482},
- abstract = {{This paper introduces an information theoretic model that allows to quantify the degree of anonymity provided by schemes for anonymous connections. It considers attackers that obtain probabilistic information about users. The degree is based on the probabilities an attacker, after observing the system, assigns to the different users of the system as being the originators of a message. As a proof of concept, the model is applied to some existing systems. The model is shown to be very useful for evaluating the level of privacy a system provides under various attack scenarios, for measuring the amount of information an attacker gets with a particular attack and for comparing different systems amongst each other.
-}},
+ abstract = {{This paper introduces an information theoretic model that allows to quantify the degree of anonymity provided by schemes for anonymous connections. It considers attackers that obtain probabilistic information about users. The degree is based on the probabilities an attacker, after observing the system, assigns to the different users of the system as being the originators of a message. As a proof of concept, the model is applied to some existing systems. The model is shown to be very useful for evaluating the level of privacy a system provides under various attack scenarios, for measuring the amount of information an attacker gets with a particular attack and for comparing different systems amongst each other.}},
keywords = {anonymity, attack, privacy},
isbn = {978-3-540-00565-0},
doi = {10.1007/3-540-36467-6},