commit 412898a76d84455d432638eabd9c31b382addeaf
parent d1281299e1fdb834b0f2c9fb8f2df023893e10db
Author: ng0 <ng0@n0.is>
Date: Fri, 5 Jan 2018 19:17:44 +0000
braces yourselves
Diffstat:
1 file changed, 20 insertions(+), 22 deletions(-)
diff --git a/gnunetbib.bib b/gnunetbib.bib
@@ -15577,7 +15577,7 @@ This exposition presents a model to formally study such algorithms. This model,
pages = {440-444},
publisher = { Springer Berlin / Heidelberg},
organization = { Springer Berlin / Heidelberg},
- abstract = {{This paper describes a zero-knowledge proof that a mix in onion routing can perform in order to proof that it did route the messages properly. This allows the deployment of a mix-net where malicious mixes can be detected without using dummy-traffic to probe for correctness. Technical.},
+ abstract = {{This paper describes a zero-knowledge proof that a mix in onion routing can perform in order to proof that it did route the messages properly. This allows the deployment of a mix-net where malicious mixes can be detected without using dummy-traffic to probe for correctness. Technical.}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.357\&rep=rep1\&type=url\&i=0},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/fault.dvi_.pdf},
author = {Wakaha Ogata and Kaoru Kurosawa and Kazue Sako and Kazunori Takatani}
@@ -15592,7 +15592,7 @@ This exposition presents a model to formally study such algorithms. This model,
publisher = {ACM},
organization = {ACM},
address = {El Paso, Texas, USA},
- abstract = {{We present a randomized construction of linear-time encodable and decodable codes that can transmit over lossy channels at rates extremely close to capacity. The encoding and decoding algorithms for these codes have fast and simple software implementations. Partial implementations of our algorithms are faster by orders of magnitude than the best software implementations of any previous algorithm for this problem. We expect these codes will be extremely useful for applications such as real-time audio and video transmission over the Internet, where lossy channels are common and fast decoding is a requirement. Despite the simplicity of the algorithms, their design and analysis are mathematically intricate. The design requires the careful choice of a random irregular bipartite graph, where the structure of the irregular graph is extremely important. We model the progress of the decoding algorithm by a set of differential equations. The solution to these equations can then be expressed as polynomials in one variable with coefficients determined by the graph structure. Based on these polynomials, we design a graph structure that guarantees successful decoding with high probability.},
+ abstract = {{We present a randomized construction of linear-time encodable and decodable codes that can transmit over lossy channels at rates extremely close to capacity. The encoding and decoding algorithms for these codes have fast and simple software implementations. Partial implementations of our algorithms are faster by orders of magnitude than the best software implementations of any previous algorithm for this problem. We expect these codes will be extremely useful for applications such as real-time audio and video transmission over the Internet, where lossy channels are common and fast decoding is a requirement. Despite the simplicity of the algorithms, their design and analysis are mathematically intricate. The design requires the careful choice of a random irregular bipartite graph, where the structure of the irregular graph is extremely important. We model the progress of the decoding algorithm by a set of differential equations. The solution to these equations can then be expressed as polynomials in one variable with coefficients determined by the graph structure. Based on these polynomials, we design a graph structure that guarantees successful decoding with high probability.}},
keywords = {loss-resilient code},
isbn = {0-89791-888-6},
doi = {http://doi.acm.org/10.1145/258533.258573},
@@ -15608,7 +15608,7 @@ This exposition presents a model to formally study such algorithms. This model,
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
address = {San Jose, CA, United States},
- abstract = {{The increased use of the Internet for everyday activities is bringing new threats to personal privacy. This paper gives an overview of existing and potential privacy-enhancing technologies for the Internet, as well as motivation and challenges for future work in this field.},
+ abstract = {{The increased use of the Internet for everyday activities is bringing new threats to personal privacy. This paper gives an overview of existing and potential privacy-enhancing technologies for the Internet, as well as motivation and challenges for future work in this field.}},
keywords = {Internet, privacy, privacy-enhancing technology},
isbn = {0818678046},
url = {http://www.cs.berkeley.edu/~daw/papers/privacy-compcon97-www/privacy-html.html},
@@ -15623,7 +15623,7 @@ This exposition presents a model to formally study such algorithms. This model,
pages = {784{\textendash}803},
abstract = {{This paper describes SRM (Scalable Reliable Multicast), a reliable multicast framework for light-weight sessions and application level framing. The algorithms of this framework are efficient, robust, and scale well to both very large networks and very large sessions. The SRM framework has been prototyped in wb, a distributed whiteboard application, which has been used on a global scale with sessions ranging from a few to a few hundred participants. The paper describes the principles that have guided the SRM design, including the IP multicast group delivery model, an end-to-end, receiver-based model of reliability, and the application level framing protocol model. As with unicast communications, the performance of a reliable multicast delivery algorithm depends on the underlying topology and operational environment. We investigate that dependence via analysis and simulation, and demonstrate an adaptive algorithm that uses the results of previous loss recovery events to adapt the control parameters used
for future loss recovery. With the adaptive algorithm, our reliable multicast delivery algorithm provides good performance over a wide range of underlying topologies.},
- keywords = {computer network performance, computer networks, Internetworking},
+ keywords = {computer network performance, computer networks, Internetworking}},
issn = {1063-6692},
doi = {10.1109/90.650139},
url = {http://dx.doi.org/10.1109/90.650139},
@@ -15637,7 +15637,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
number = {4},
year = {1997},
month = {August},
- abstract = {{The World Wide Web has recently matured enough to provide everyday users with an extremely cheap publishing mechanism. However, the current WWW architecture makes it fundamentally difficult to provide content without identifying yourself. We examine the problem of anonymous publication on the WWW, propose a design suitable for practical deployment, and describe our implementation. Some key features of our design include universal accessibility by pre-existing clients, short persistent names, security against social, legal, and political pressure, protection against abuse, and good performance.},
+ abstract = {{The World Wide Web has recently matured enough to provide everyday users with an extremely cheap publishing mechanism. However, the current WWW architecture makes it fundamentally difficult to provide content without identifying yourself. We examine the problem of anonymous publication on the WWW, propose a design suitable for practical deployment, and describe our implementation. Some key features of our design include universal accessibility by pre-existing clients, short persistent names, security against social, legal, and political pressure, protection against abuse, and good performance.}},
keywords = {anonymous publishing},
doi = {10.1.1.41.4031},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.4031},
@@ -15652,7 +15652,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {London, UK},
- abstract = {{Delta algorithms compress data by encoding one file in terms of another. This type of compression is useful in a number of situations: storing multiple versions of data, distributing updates, storing backups, transmitting video sequences, and others. This paper studies the performance parameters of several delta algorithms, using a benchmark of over 1300 pairs of files taken from two successive releases of GNU software. Results indicate that modern delta compression algorithms based on Ziv-Lempel techniques significantly outperform diff, a popular but older delta compressor, in terms of compression ratio. The modern compressors also correlate better with the actual difference between files; one of them is even faster than diff in both compression and decompression speed.},
+ abstract = {{Delta algorithms compress data by encoding one file in terms of another. This type of compression is useful in a number of situations: storing multiple versions of data, distributing updates, storing backups, transmitting video sequences, and others. This paper studies the performance parameters of several delta algorithms, using a benchmark of over 1300 pairs of files taken from two successive releases of GNU software. Results indicate that modern delta compression algorithms based on Ziv-Lempel techniques significantly outperform diff, a popular but older delta compressor, in terms of compression ratio. The modern compressors also correlate better with the actual difference between files; one of them is even faster than diff in both compression and decompression speed.}},
isbn = {3-540-61964-X},
doi = {10.1007/BFb0023076},
url = {http://www.springerlink.com/content/584k258285p18x4g/},
@@ -15666,7 +15666,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {London, UK},
- abstract = {{Delta algorithms compress data by encoding one file in terms of another. This type of compression is useful in a number of situations: storing multiple versions of data, distributing updates, storing backups, transmitting video sequences, and others. This paper studies the performance parameters of several delta algorithms, using a benchmark of over 1300 pairs of files taken from two successive releases of GNU software. Results indicate that modern delta compression algorithms based on Ziv-Lempel techniques significantly outperform diff, a popular but older delta compressor, in terms of compression ratio. The modern compressors also correlate better with the actual difference between files; one of them is even faster than diff in both compression and decompression speed.},
+ abstract = {{Delta algorithms compress data by encoding one file in terms of another. This type of compression is useful in a number of situations: storing multiple versions of data, distributing updates, storing backups, transmitting video sequences, and others. This paper studies the performance parameters of several delta algorithms, using a benchmark of over 1300 pairs of files taken from two successive releases of GNU software. Results indicate that modern delta compression algorithms based on Ziv-Lempel techniques significantly outperform diff, a popular but older delta compressor, in terms of compression ratio. The modern compressors also correlate better with the actual difference between files; one of them is even faster than diff in both compression and decompression speed.}},
isbn = {3-540-61964-X},
doi = {10.1007/BFb0023076},
url = {http://www.springerlink.com/content/584k258285p18x4g/},
@@ -15680,7 +15680,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
publisher = {USENIX Association},
organization = {USENIX Association},
address = {Berkeley, CA, USA},
- abstract = {{this paper is that a traditional identity certificate is neither necessary nor sufficient for this purpose. It is especially useless if the two parties concerned did not have the foresight to obtain such certificates before desiring to open a secure channel. There are many methods for establishing identity without using certificates from trusted certification authorities. The relationship between verifier and subject guides the choice of method. Many of these relationships have easy, straight-forward methods for binding a public key to an identity, using a broadcast channel or 1:1 meetings, but one relationship makes it especially difficult. That relationship is one with an old friend with whom you had lost touch but who appears now to be available on the net. You make contact and share a few exchanges which suggest to you that this is, indeed, your old friend. Then you want to form a secure channel in order to carry on a more extensive conversation in private. This case is subject to the man-in-themiddle attack. For this case, a protocol is presented which binds a pair of identities to a pair of public keys without using any certificates issued by a trusted CA. The apparent direct conflict between conventional wisdom and the thesis of this paper lies in the definition of the word "identity" -- a word which is commonly left undefined in discussions of certification.},
+ abstract = {{this paper is that a traditional identity certificate is neither necessary nor sufficient for this purpose. It is especially useless if the two parties concerned did not have the foresight to obtain such certificates before desiring to open a secure channel. There are many methods for establishing identity without using certificates from trusted certification authorities. The relationship between verifier and subject guides the choice of method. Many of these relationships have easy, straight-forward methods for binding a public key to an identity, using a broadcast channel or 1:1 meetings, but one relationship makes it especially difficult. That relationship is one with an old friend with whom you had lost touch but who appears now to be available on the net. You make contact and share a few exchanges which suggest to you that this is, indeed, your old friend. Then you want to form a secure channel in order to carry on a more extensive conversation in private. This case is subject to the man-in-themiddle attack. For this case, a protocol is presented which binds a pair of identities to a pair of public keys without using any certificates issued by a trusted CA. The apparent direct conflict between conventional wisdom and the thesis of this paper lies in the definition of the word "identity" -- a word which is commonly left undefined in discussions of certification.}},
keywords = {certificate revocation, public key cryptography},
url = {http://portal.acm.org/citation.cfm?id=1267576$\#$},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.31.7263.pdf},
@@ -15693,7 +15693,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
month = {09/1996},
pages = {242{\textendash}252},
address = {Prague, CZ},
- abstract = {{The Internet was designed to provide a communications channel that is as resistant to denial of service attacks as human ingenuity can make it. In this note, we propose the construction of a storage medium with similar properties. The basic idea is to use redundancy and scattering techniques to replicate data across a large set of machines (such as the Internet), and add anonymity mechanisms to drive up the cost of selective service denial attacks. The detailed design of this service is an interesting scientific problem, and is not merely academic: the service may be vital in safeguarding individual rights against new threats posed by the spread of electronic publishing},
+ abstract = {{The Internet was designed to provide a communications channel that is as resistant to denial of service attacks as human ingenuity can make it. In this note, we propose the construction of a storage medium with similar properties. The basic idea is to use redundancy and scattering techniques to replicate data across a large set of machines (such as the Internet), and add anonymity mechanisms to drive up the cost of selective service denial attacks. The detailed design of this service is an interesting scientific problem, and is not merely academic: the service may be vital in safeguarding individual rights against new threats posed by the spread of electronic publishing}},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.1952\&rep=rep1\&type=pdf},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/eternity.pdf},
author = {Ross Anderson}
@@ -15707,7 +15707,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
publisher = {Springer-Verlag, LNCS 1174},
organization = {Springer-Verlag, LNCS 1174},
abstract = {{Abstract. This paper describes an architecture, Onion Routing, that limits a network{\textquoteright}s vulnerability to trac analysis. The architecture provides anonymous socket connections by means of proxy servers. It provides real-time, bi-directional, nonymous communication for any protocol that can be adapted to use a proxy service. Specically, the architecture provides for bi-directional communication even though no-one but the initiator{\textquoteright}s proxy server knows anything but previous and next hops
-in the communication chain. This implies that neither the respondent nor his proxy server nor any external observer need know the identity of the initiator or his proxy server. A prototype of Onion Routing has been implemented. This prototype works with HTTP (World Wide Web) proxies. In addition, an analogous proxy for TELNET has been implemented. Proxies for FTP and SMTP are under development},
+in the communication chain. This implies that neither the respondent nor his proxy server nor any external observer need know the identity of the initiator or his proxy server. A prototype of Onion Routing has been implemented. This prototype works with HTTP (World Wide Web) proxies. In addition, an analogous proxy for TELNET has been implemented. Proxies for FTP and SMTP are under development}},
keywords = {communication chain, onion routing, traffic analysis},
isbn = {3-540-61996-8},
url = {http://portal.acm.org/citation.cfm?id=731526},
@@ -15725,7 +15725,7 @@ in the communication chain. This implies that neither the respondent nor his pro
publisher = {AAAI Press},
organization = {AAAI Press},
address = {Portland, OR, United States},
- abstract = {{Constraint satisfaction is a powerful tool for representing and solving decision problems with complete knowledge about the world. We extend the CSP framework so as to represent decision problems under incomplete knowledge. The basis of the extension consists in a distinction between controllable and uncontrollable variables -- hence the terminology "mixed CSP" -- and a "solution" gives actually a conditional decision. We study the complexity of deciding the consistency of a mixed CSP. As the problem is generally intractable, we propose an algorithm for finding an approximate solution.},
+ abstract = {{Constraint satisfaction is a powerful tool for representing and solving decision problems with complete knowledge about the world. We extend the CSP framework so as to represent decision problems under incomplete knowledge. The basis of the extension consists in a distinction between controllable and uncontrollable variables -- hence the terminology "mixed CSP" -- and a "solution" gives actually a conditional decision. We study the complexity of deciding the consistency of a mixed CSP. As the problem is generally intractable, we propose an algorithm for finding an approximate solution.}},
keywords = {algorithms, constraint satisfaction, decision problem, framework, imcomplete knowledge, mixed CSP},
isbn = {0-262-51091-X},
url = {http://dl.acm.org/citation.cfm?id=1892875.1892901},
@@ -15737,7 +15737,7 @@ in the communication chain. This implies that neither the respondent nor his pro
booktitle = {Symposium on Network and Distributed System Security},
year = {1996},
pages = {2{\textendash}16},
- abstract = {{Increasingly large numbers of people communicate today via electronic means such as email or news forums. One of the basic properties of the current electronic communication means is the identification of the end-points. However, at times it is desirable or even critical to hide the identity and/or whereabouts of the end-points (e.g., human users) involved. This paper discusses the goals and desired properties of anonymous email in general and introduces the design and salient features of Babel anonymous remailer. Babel allows email users to converse electronically while remaining anonymous with respect to each other and to other-- even hostile--parties. A range of attacks and corresponding countermeasures is considered. An attempt is made to formalize and quantify certain dimensions of anonymity and untraceable communication.},
+ abstract = {{Increasingly large numbers of people communicate today via electronic means such as email or news forums. One of the basic properties of the current electronic communication means is the identification of the end-points. However, at times it is desirable or even critical to hide the identity and/or whereabouts of the end-points (e.g., human users) involved. This paper discusses the goals and desired properties of anonymous email in general and introduces the design and salient features of Babel anonymous remailer. Babel allows email users to converse electronically while remaining anonymous with respect to each other and to other-- even hostile--parties. A range of attacks and corresponding countermeasures is considered. An attempt is made to formalize and quantify certain dimensions of anonymity and untraceable communication.}},
url = {http://eprints.kfupm.edu.sa/50994/1/50994.pdf},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/babel.pdf},
author = {Ceki Gulcu and Gene Tsudik}
@@ -15749,7 +15749,7 @@ in the communication chain. This implies that neither the respondent nor his pro
number = {2},
year = {1996},
month = {August},
- abstract = {{Remailers have permitted Internet users to take advantage of the medium as a means to communicate with others globally on sensitive issues while maintaining a high degree of privacy. Recent events have clearly indicated that privacy is increasingly at risk on the global networks. Individual efforts have, so far, worked well in maintaining for most Internet users a modicum of anonymity. With the growth of increasingly sophisticated techniques to defeat anonymity, there will be a need for both standards and policies to continue to make privacy on the Internet a priority.},
+ abstract = {{Remailers have permitted Internet users to take advantage of the medium as a means to communicate with others globally on sensitive issues while maintaining a high degree of privacy. Recent events have clearly indicated that privacy is increasingly at risk on the global networks. Individual efforts have, so far, worked well in maintaining for most Internet users a modicum of anonymity. With the growth of increasingly sophisticated techniques to defeat anonymity, there will be a need for both standards and policies to continue to make privacy on the Internet a priority.}},
url = {http://131.193.153.231/www/issues/issue2/remailers/index.html},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Prospects\%20for\%20Remailers.pdf},
author = {Sameer Parekh}
@@ -15757,7 +15757,7 @@ in the communication chain. This implies that neither the respondent nor his pro
@booklet {Stemm96reducingpower,
title = {Reducing Power Consumption of Network Interfaces in Hand-Held Devices (Extended Abstract)},
year = {1996},
- abstract = {{An important issue to be addressed for the next generation of wirelessly-connected hand-held devices is battery longevity. In this paper we examine this issue from the point of view of the Network Interface (NI). In particular, we measure the power usage of two PDAs, the Apple Newton Messagepad and Sony Magic Link, and four NIs, the Metricom Ricochet Wireless Modem, the AT\&T Wavelan operating at 915 MHz and 2.4 GHz, and the IBM Infrared Wireless LAN Adapter. These measurements clearly indicate that the power drained by the network interface constitutes a large fraction of the total power used by the PDA. We also conduct trace-driven simulation experiments and show that by using applicationspecific policies it is possible to ...},
+ abstract = {{An important issue to be addressed for the next generation of wirelessly-connected hand-held devices is battery longevity. In this paper we examine this issue from the point of view of the Network Interface (NI). In particular, we measure the power usage of two PDAs, the Apple Newton Messagepad and Sony Magic Link, and four NIs, the Metricom Ricochet Wireless Modem, the AT\&T Wavelan operating at 915 MHz and 2.4 GHz, and the IBM Infrared Wireless LAN Adapter. These measurements clearly indicate that the power drained by the network interface constitutes a large fraction of the total power used by the PDA. We also conduct trace-driven simulation experiments and show that by using applicationspecific policies it is possible to ...}},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.8384},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.39.8384.pdf},
author = {Mark Stemm and Paul Gauthier and Daishi Harada and Katz, Randy H.}
@@ -15770,7 +15770,7 @@ in the communication chain. This implies that neither the respondent nor his pro
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
address = {London, UK},
- abstract = {{This paper is a first step towards an understanding of the inherent limitations of distributed data structures. We propose a model of distributed search trees that is based on few natural assumptions. We prove that any class of trees within our model satisfies a lower bound of \Omega\Gamma p m) on the worst case height of distributed search trees for m keys. That is, unlike in the single site case, balance in the sense that the tree height satisfies a logarithmic upper bound cannot be achieved. This is true although each node is allowed to have arbitrary degree (note that in this case, the height of a single site search tree is trivially bounded by one). By proposing a method that generates trees of height O( p m), we show the bound to be tight. 1 Introduction Distributed data structures have attracted considerable attention in the past few years. From a practical viewpoint, this is due to the increasing availability of networks of workstations.},
+ abstract = {{This paper is a first step towards an understanding of the inherent limitations of distributed data structures. We propose a model of distributed search trees that is based on few natural assumptions. We prove that any class of trees within our model satisfies a lower bound of \Omega\Gamma p m) on the worst case height of distributed search trees for m keys. That is, unlike in the single site case, balance in the sense that the tree height satisfies a logarithmic upper bound cannot be achieved. This is true although each node is allowed to have arbitrary degree (note that in this case, the height of a single site search tree is trivially bounded by one). By proposing a method that generates trees of height O( p m), we show the bound to be tight. 1 Introduction Distributed data structures have attracted considerable attention in the past few years. From a practical viewpoint, this is due to the increasing availability of networks of workstations.}},
isbn = {3-540-60220-8},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.4081},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.34.4081.pdf},
@@ -15795,7 +15795,7 @@ in the communication chain. This implies that neither the respondent nor his pro
title = {The final frontier: Embedding networked sensors in the soil},
year = {1995},
publisher = {Lecture Notes in Computer Science},
- abstract = {{This paper presents the first systematic design of a robust sensing system suited for the challenges presented by soil environments. We describe three soil deployments we have undertaken: in Bangladesh, and in California at the James Reserve and in the San Joaquin River basin. We discuss our experiences and lessons learned in deploying soil sensors. We present data from each deployment and evaluate our techniques for improving the information yield from these systems. Our most notable results include the following: in-situ calibration techniques to postpone labor-intensive and soil disruptive calibration events developed at the James Reserve; achieving a 91 \% network yield from a Mica2 wireless sensing system without end-to-end reliability in Bangladesh; and the javelin, a new platform that facilitates the deployment, replacement and in-situ calibration of soil sensors, deployed in the San Joaquin River basin. Our techniques to increase information yield have already led to scientifically promising results, including previously unexpected diurnal cycles in various soil chemistry parameters across several deployments. },
+ abstract = {{This paper presents the first systematic design of a robust sensing system suited for the challenges presented by soil environments. We describe three soil deployments we have undertaken: in Bangladesh, and in California at the James Reserve and in the San Joaquin River basin. We discuss our experiences and lessons learned in deploying soil sensors. We present data from each deployment and evaluate our techniques for improving the information yield from these systems. Our most notable results include the following: in-situ calibration techniques to postpone labor-intensive and soil disruptive calibration events developed at the James Reserve; achieving a 91 \% network yield from a Mica2 wireless sensing system without end-to-end reliability in Bangladesh; and the javelin, a new platform that facilitates the deployment, replacement and in-situ calibration of soil sensors, deployed in the San Joaquin River basin. Our techniques to increase information yield have already led to scientifically promising results, including previously unexpected diurnal cycles in various soil chemistry parameters across several deployments. }},
keywords = {sensor networks, wireless sensor network},
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.7766},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.120.7766.pdf},
@@ -15808,7 +15808,7 @@ in the communication chain. This implies that neither the respondent nor his pro
month = {05/1995},
publisher = {IEEE Computer Society},
organization = {IEEE Computer Society},
- abstract = {{Even as wireless networks create the potential for access to information from mobile platforms, they pose a problem for privacy. In order to retrieve messages, users must periodically poll the network. The information that the user must give to the network could potentially be used to track that user. However, the movements of the user can also be used to hide the user{\textquoteright}s location if the protocols for sending and retrieving messages are carefully designed. We have developed a replicated memory service which allows users to read from memory without revealing which memory locations they are reading. Unlike previous protocols, our protocol is efficient in its use of computation and bandwidth. We show how this protocol can be used in conjunction with existing privacy preserving protocols to allow a user of a mobile computer to maintain privacy despite active attacks.},
+ abstract = {{Even as wireless networks create the potential for access to information from mobile platforms, they pose a problem for privacy. In order to retrieve messages, users must periodically poll the network. The information that the user must give to the network could potentially be used to track that user. However, the movements of the user can also be used to hide the user{\textquoteright}s location if the protocols for sending and retrieving messages are carefully designed. We have developed a replicated memory service which allows users to read from memory without revealing which memory locations they are reading. Unlike previous protocols, our protocol is efficient in its use of computation and bandwidth. We show how this protocol can be used in conjunction with existing privacy preserving protocols to allow a user of a mobile computer to maintain privacy despite active attacks.}},
url = {http://portal.acm.org/citation.cfm?id=882491.884247},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/cooper.pdf},
author = {David A. Cooper and Kenneth P. Birman}
@@ -15820,9 +15820,7 @@ in the communication chain. This implies that neither the respondent nor his pro
pages = {41{\textendash}50},
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
- abstract = {{Publicly accessible databases are an indispensable resource for retrieving up-to-date information. But they also pose a significant risk to the privacy of the user, since a curious database operator can follow the user{\textquoteright}s queries and infer what the user is after. Indeed, in cases where the users{\textquoteright} intentions are to be kept secret, users are often cautious about accessing the database. It can be shown that when accessing a single database, to completely guarantee the privacy of the user, the whole database should be down-loaded; namely n bits should be communicated (where n is the number of bits in the database).In this work, we investigate whether by replicating the database, more efficient solutions to the private retrieval problem can be obtained. We describe schemes that enable a user to access k replicated copies of a database (k>=2) and privately retrieve information stored in the database. This means that each individual server (holding a replicated copy of the database) gets no information on the identity of the item retrieved by the user. Our schemes use the replication to gain substantial saving. In particular, we present a two-server scheme with communication complexity O(n1/3).
-
-},
+ abstract = {{Publicly accessible databases are an indispensable resource for retrieving up-to-date information. But they also pose a significant risk to the privacy of the user, since a curious database operator can follow the user{\textquoteright}s queries and infer what the user is after. Indeed, in cases where the users{\textquoteright} intentions are to be kept secret, users are often cautious about accessing the database. It can be shown that when accessing a single database, to completely guarantee the privacy of the user, the whole database should be down-loaded; namely n bits should be communicated (where n is the number of bits in the database).In this work, we investigate whether by replicating the database, more efficient solutions to the private retrieval problem can be obtained. We describe schemes that enable a user to access k replicated copies of a database (k>=2) and privately retrieve information stored in the database. This means that each individual server (holding a replicated copy of the database) gets no information on the identity of the item retrieved by the user. Our schemes use the replication to gain substantial saving. In particular, we present a two-server scheme with communication complexity O(n1/3).}},
doi = {http://doi.acm.org/10.1145/293347.293350},
url = {http://portal.acm.org/citation.cfm?id=293347.293350},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/pir.pdf},
@@ -15834,7 +15832,7 @@ in the communication chain. This implies that neither the respondent nor his pro
year = {1995},
publisher = {Springer-Verlag},
organization = {Springer-Verlag},
- abstract = {{We present a receipt-free voting scheme based on a mix- type anonymous channel [Cha81, PIK93]. The receipt-freeness property [BT94] enables voters to hide how they have voted even from a powerful adversary who is trying to coerce him. The work of [BT94] gave the first solution using a voting booth, which is a hardware assumption not unlike that in current physical elections. In our proposed scheme, we reduce the physical assumptions required to obtain receipt-freeness. Our sole physical assumption is the existence of a private channel through which the center can send the voter a message without fear of eavesdropping.},
+ abstract = {{We present a receipt-free voting scheme based on a mix- type anonymous channel [Cha81, PIK93]. The receipt-freeness property [BT94] enables voters to hide how they have voted even from a powerful adversary who is trying to coerce him. The work of [BT94] gave the first solution using a voting booth, which is a hardware assumption not unlike that in current physical elections. In our proposed scheme, we reduce the physical assumptions required to obtain receipt-freeness. Our sole physical assumption is the existence of a private channel through which the center can send the voter a message without fear of eavesdropping.}},
isbn = {978-3-540-59409-3},
doi = {10.1007/3-540-49264-X},
url = {http://www.springerlink.com/content/jhf7ccxn2fj2gfum/},
@@ -15844,7 +15842,7 @@ in the communication chain. This implies that neither the respondent nor his pro
@booklet {Demers94thebayou,
title = {The Bayou Architecture: Support for Data Sharing among Mobile Users},
year = {1994},
- abstract = {{The Bayou System is a platform of replicated, highly-available, variable-consistency, mobile databases on which to build collaborative applications. This paper presents the preliminary system architecture along with the design goals that influenced it. We take a fresh, bottom-up and critical look at the requirements of mobile computing applications and carefully pull together both new and existing techniques into an overall architecture that meets these requirements. Our emphasis is on supporting application-specific conflict detection and resolution and on providing application controlled inconsistency.},
+ abstract = {{The Bayou System is a platform of replicated, highly-available, variable-consistency, mobile databases on which to build collaborative applications. This paper presents the preliminary system architecture along with the design goals that influenced it. We take a fresh, bottom-up and critical look at the requirements of mobile computing applications and carefully pull together both new and existing techniques into an overall architecture that meets these requirements. Our emphasis is on supporting application-specific conflict detection and resolution and on providing application controlled inconsistency.}},
keywords = {reliability, reputation},
doi = {10.1109/WMCSA.1994.37},
url = {http://portal.acm.org/citation.cfm?id=1440028},