gnunetbib

Bibliography (BibTeX, based on AnonBib)
Log | Files | Refs | README | LICENSE

gnunetbib.bib (1771498B)


      1 %
      2 %
      3 % Magic fields:
      4 %
      5 %     www_tags -- used to control which page groups the paper appears in.
      6 %        This is a space-separated list.
      7 %     www_section -- the topic used under 'topics.html'
      8 %     www_{ps|pdf|ps_gz|txt|html|abstract}_url -- link for text/abstract of
      9 %          an entry.
     10 %     www_important -- set for important entries
     11 %     www_remarks -- annotation for an entry
     12 % www_section: A category or keyword, NOT the website.
     13 % example:
     14 % url = misc
     15 
     16 %% List of sections
     17 @string{comm = "Anonymous communication"}
     18 @string{traffic = "Traffic analysis"}
     19 @string{pub = "Anonymous publication"}
     20 @string{proofs = "Provable shuffles"}
     21 @string{methods = "Formal methods"}
     22 @string{nym = "Pseudonymity"}
     23 @string{pir = "Private Information Retrieval"}
     24 @string{economics = "Economics"}
     25 @string{censorship = "Communications Censorship"}
     26 @string{credentials = "E-Cash / Anonymous Credentials"}
     27 @string{misc = "Misc"}
     28 @string{torperf = "Tor Performance"}
     29 @string{conference = "Conference"}
     30 @string{unsorted = "Unsorted"}
     31 
     32 #
     33 # Proposed new sections: application privacy, data anonymization, ...
     34 #
     35 
     36 @string{and = ", "}
     37 @string{lncs = "Lecture Notes in Computer Science"}
     38 
     39 @phdthesis {schanzen-2020,
     40 	author = "Schanzenbach, Martin",
     41 	title = "Towards Self-sovereign, decentralized personal data sharing and identity management",
     42 	type = "Dissertation",
     43 	school = "Technische Universit{\"a}t M{\"u}nchen",
     44 	year = 2020,
     45   address = {Munich}
     46 	keywords = {DNS, GNU Name System, GNUnet, privacy, ReclaimID},
     47   www_section = {Self-sovereign identity, GNUnet, GNU Name System},
     48   www_tags = selected,
     49   www_pdf_url = {http://mediatum.ub.tum.de/?id=1545514},
     50   url = {https://bibliography.gnunet.org},
     51   abstract = {Today, identity management is a key element for commercial and private services on the Internet. Over the past decade, digital identities evolved away from decentralized, pseudonymous, user-controlled personas towards centralized, unabiguous identities managed at and provided through service providers. This development was sparked by the requirement of real identities in the context of electronic commerce. However, it was particularly fuelled later by the emergence of social media and the possibilities it provides to people in order to establish social connections. The following centralization of identities at a handful of service providers significantly improved usability and reliability of identity services. Those benefits come at the expense of other, arguably equally important areas. For users, it is privacy and the permanent threat of being tracked and analyzed. For service providers, it is liability and the risk of facing significant punishment caused by strict privacy regulations which try to counteract the former. In this thesis, we investigate state-of-the-art approaches to modern identity management. We take a look at existing standards and recent research in order to understand the status quo and how it can be improved. As a result from our research, we present the following contributions: In order to allow users to reclaim control over their identities and personal data, we propose a design for a decentralized, self-sovereign directory service. This service allows users to share personal data with services without the need of a trusted third party. Unlike existing research in this area, we propose mechanisms which allow users to efficiently enforce access control on their data. Further, we investigate how trust can be established in user-managed, self-sovereign identities. We propose a trust establishment mechanism through the use of secure name systems. It allows users and organizations to establish trust relationships and identity assertions without the need of centralized public key infrastructures (PKIs). Additionally, we show how recent advancements in the area of non-interactive zero-knowledge (NIZK) protocols can be leveraged in order to create privacy-preserving attribute-based credentials (PP-ABCs) suitable for use in self-sovereign identity systems including our proposed directory service. We provide proof of concept implementations of our designs and evaluate them to show that they are suitable for practical applications.}
     52 }
     53 
     54 @mastersthesis {mteich-2017,
     55         title = {Implementing Privacy Preserving Auction Protocols},
     56         volume = {Master of Science},
     57         year = {2017},
     58         month = feb,
     59         pages = {0--100},
     60         school = {TUM},
     61         address = {Munich},
     62         abstract = {In this thesis we translate Brandt's privacy preserving sealed-bid online auction protocol from RSA to elliptic curve arithmetic and analyze the theoretical and practical benefits. With Brandt's protocol, the auction outcome is completely resolved by the bidders and the seller without the need for a trusted third party. Loosing bids are not revealed to anyone. We present libbrandt, our implementation of four algorithms with different outcome and pricing properties, and describe how they can be incorporated in a real-world online auction system. Our performance measurements show a reduction of computation time and prospective bandwidth cost of over 90\% compared to an implementation of the RSA version of the same algorithms. We also evaluate how libbrandt scales in different dimensions and conclude that the system we have presented is promising with respect to an adoption in the real world},
     63         www_section = {auctions, GNUnet, secure multi-party computation},
     64         www_tags = selected,
     65         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thesis_0.pdf},
     66         url = {https://bibliography.gnunet.org},
     67         author = {Markus Teich},
     68         editor = {Totakura, Sree Harsha and Grothoff, Christian and Felix Brandt}
     69 }
     70 @mastersthesis {lurchi-2017,
     71         title = {Improving Voice over GNUnet},
     72         volume = {Bachelor},
     73         year = {2017},
     74         month = {July},
     75         pages = {0--48},
     76         school = {TU Berlin},
     77         type = {B.S},
     78         address = {Berlin},
     79         abstract = {In contrast to ubiquitous cloud-based solutions the telephony application GNUnet conversation provides fully-decentralized, secure voice communication and thus impedes mass surveillance. The aim of this thesis is to investigate why GNUnet conversation currently provides poor Quality of Experience under typical wide area network conditions and to propose optimization measures. After network shaping and the initialization of two isolated GNUnet peers had been automated, delay measurements were done. With emulated network characteristics network delay, cryptography delays and audio codec delays were measured and transmitted speech was recorded. An analysis of the measurement results and a subjective assessment of the speech recordings revealed that extreme outliers occur in most scenarios and impair QoE. Moreover it was shown that GNUnet conversation introduces a large delay that confines the environment in which good QoE is possible. In the measurement environment at least 23 ms always ocurred of which large parts are were caused by cryptography. It was shown that optimization in the cryptography part and other components are possible. Finally the conditions for currently reaching good QoE were determined and ideas for further investigations were presented},
     80         www_section = {CADET, GNUnet, measurement, performance},
     81         www_tags = selected,
     82         url = {https://bibliography.gnunet.org},
     83         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lurchi-bs-thesis.pdf},
     84         author = {Christian Ulrich}
     85 }
     86 @conference {dold2016byzantine,
     87         title = {Byzantine Set-Union Consensus using Efficient Set Reconciliation},
     88         booktitle = {International Conference on Availability, Reliability and Security (ARES)},
     89         year = {2016},
     90         www_section = unsorted,
     91         www_tags = selected,
     92         url = {https://bibliography.gnunet.org},
     93         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dold2016byzantine.pdf},
     94         author = {Dold, Florian and Grothoff, Christian}
     95 }
     96 @conference {consensus2016,
     97         title = {Byzantine Set-Union Consensus using Efficient Set Reconciliation},
     98         booktitle = {International Conference on Availability, Reliability and Security (ARES)},
     99         year = {2016},
    100         month = jun,
    101         abstract = {Applications of secure multiparty computation such as certain electronic voting or auction protocols require Byzantine agreement on large sets of elements. Implementations proposed in the literature so far have relied on state machine replication, and reach agreement on each individual set element in sequence. We introduce set-union consensus, a specialization of Byzantine consensus that reaches agreement over whole sets. This primitive admits an efficient and simple implementation by the composition of Eppstein's set reconciliation protocol with Ben-Or's ByzConsensus protocol. A free software implementation of this construction is available in GNUnet. Experimental results indicate that our approach results in an efficient protocol for very large sets, especially in the absence of Byzantine faults. We show the versatility of set-union consensus by using it to implement distributed key  generation, ballot collection and cooperative decryption for an electronic voting protocol implemented in GNUnet},
    102         www_section = {byzantine fault tolerance, consensus, GNUnet},
    103         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/consensus2016.pdf},
    104         www_tags = selected,
    105         url = {https://bibliography.gnunet.org},
    106         author = {Florian Dold and Christian Grothoff}
    107 }
    108 @conference {taler2016space,
    109         title = {Enabling Secure Web Payments with GNU Taler},
    110         booktitle = {6th International Conference on Security, Privacy and Applied Cryptographic Engineering},
    111         year = {2016},
    112         month = dec,
    113         publisher = {Springer},
    114         organization = {Springer},
    115         address = {Hyderabad},
    116         abstract = {GNU Taler is a new electronic online payment system which provides privacy for customers and accountability for merchants. It uses an exchange service to issue digital coins using blind signatures, and is thus not subject to the performance issues that plague Byzantine fault-tolerant consensus-based solutions. The focus of this paper is addressing the challenges payment systems face in the context of the Web.  We discuss how to address Web-specific challenges, such as handling bookmarks and sharing of links, as well as supporting users that have disabled JavaScript.  Web payment systems must also navigate various constraints imposed by modern Web browser security architecture, such as same-origin policies and the separation between browser extensions and Web pages.  While our analysis focuses on how Taler operates within the security infrastructure provided by the modern Web, the results partially generalize to other payment systems. We also include the perspective of merchants, as existing systems have often struggled with securing payment information at the merchant's side.  Here, challenges include avoiding database transactions for customers that do not actually go through with the purchase, as well as cleanly separating security-critical functions of the payment system from the rest of the Web service},
    117         www_section = {blind signatures, GNUnet, incentives, payments, Taler, web},
    118         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/taler2016space.pdf},
    119         www_tags = selected,
    120         url = {https://taler.net/en/bibliography.html},
    121         author = {Jeffrey Burdges and Florian Dold and Christian Grothoff and Marcello Stanisci}
    122 }
    123 @mastersthesis {xrs2016,
    124         title = {GNUnet und Informationsmacht: Analyse einer P2P-Technologie und ihrer sozialen Wirkung},
    125         volume = {Diplominformatiker},
    126         year = {2016},
    127         month = apr,
    128         pages = {0--103},
    129         school = {Humboldt-Universit{\"a}t zu Berlin},
    130         type = {Diplomarbeit},
    131         address = {Berlin},
    132         abstract = {This thesis studies the GNUnet project comprising its history, ideas and the P2P network technology. It specifically investigates the question of emancipatory potentials with regard to forms of information power due to a widely deployed new Internet technology and tries to identify essential suspensions of power within the scope of an impact assessment. Moreover, we will see by contrasting the GNUnet project with the critical data protection project, founded on social theory, that both are heavily concerned about the problem of illegitimate and unrestrained information power, giving us additional insights for the assessment. Last but least I'll try to present a scheme of how both approaches may interact to realize their goals},
    133         www_section = {GNUnet, peer-to-peer},
    134         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/xrs2016.pdf},
    135         www_tags = selected,
    136         url = {https://bibliography.gnunet.org},
    137         author = {Christian Ricardo K{\"u}hne}
    138 }
    139 @conference {2016,
    140         title = {Managing and Presenting User Attributes over a Decentralized Secure Name System},
    141         booktitle = {Data Privacy Management and Security Assurance--11th International Workshop, {DPM} 2016 and 5th International Workshop, {QASA} 2016, Heraklion, Crete, Greece, September 26-27, 2016, Proceedings},
    142         year = {2016},
    143         month = sep,
    144         publisher = {Springer},
    145         organization = {Springer},
    146         address = {Crete, Greece},
    147         abstract = {Today, user attributes are managed at centralized identity providers. However, two centralized identity providers dominate digital identity and access management on the web. This is increasingly becoming a privacy problem in times of mass surveillance and data mining for targeted advertisement. Existing systems for attribute sharing or credential presentation either rely on a trusted third party service or require the presentation to be online and synchronous. In this paper we propose a concept that allows the user to manage and share his attributes asynchronously with a requesting party using a secure, decentralized name system},
    148         www_section = {Decentralisation, GNUnet, Identity and Access Management, User Attributes},
    149         www_tags = selected,
    150         url = {https://bibliography.gnunet.org},
    151         author = {Martin Schanzenbach and Christian Banse}
    152 }
    153 @conference {p4t2016,
    154         title = {Privacy-Preserving Abuse Detection in Future Decentralised Online Social Networks},
    155         booktitle = {Data Privacy Management (DPM)},
    156         year = {2016},
    157         month = sep,
    158         publisher = {Springer},
    159         organization = {Springer},
    160         address = {Heraklion, Greece},
    161         abstract = {Future online social networks need to not only protect sensitive data of their users, but also protect them from abusive behavior coming from malicious participants in the network. We investigate the use of supervised learning techniques to detect abusive behavior and describe privacy-preserving protocols to compute the feature set required by abuse classification algorithms in a secure and privacy-preserving way.  While our method is not yet fully resilient against a strong adaptive adversary, our evaluation suggests that it will be useful to detect abusive behavior with a minimal impact on privacy},
    162         www_section = {abuse, GNUnet, Privacy preserving, reputation, Social networking},
    163         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p4t.pdf},
    164         www_tags = selected,
    165         url = {https://bibliography.gnunet.org},
    166         author = {{\'A}lvaro Garc{\'\i}a-Recuero and Jeffrey Burdges and Christian Grothoff}
    167 }
    168 @article {fk-2016-1-p46,
    169         title = {Zur Idee herrschaftsfreier kooperativer Internetdienste},
    170         journal = {FIfF-Kommunikation},
    171         year = {2016},
    172         chapter = {46},
    173         www_section = {Architecture, GNUnet, Internet},
    174         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fk-2016-1-p46.pdf},
    175         www_tags = selected,
    176         url = {https://bibliography.gnunet.org},
    177         author = {Christian Ricardo K{\"u}hne}
    178 }
    179 @mastersthesis {ma_dold_consensus_21dec2015byzantine,
    180         title = {Byzantine Fault Tolerant Set Consensus with Efficient Set Reconciliation},
    181         volume = {M.S},
    182         year = {2015},
    183         month = dec,
    184         pages = {0--69},
    185         school = {Technische Universit{\"a}t M{\"u}nchen},
    186         type = {Master},
    187         address = {M{\"u}nchen},
    188         abstract = {Byzantine consensus is a fundamental and well-studied problem in the area of distributed system. It requires a group of peers to reach agreement on some value, even if a fraction of the peers is controlled by an adversary. This thesis proposes set union consensus, an efficient generalization of Byzantine consensus from single elements to sets. This is practically motivated by Secure Multiparty Computation protocols such as electronic voting, where a large set of elements must be collected and agreed upon. Existing practical implementations of Byzantine consensus are typically based on state machine replication and not well-suited for agreement on sets, since they must process individual agreements on all set elements in sequence. We describe and evaluate our implementation of set union consensus in GNUnet, which is based on a composition of Eppstein set reconciliation protocol with the simple gradecast consensus prococol described by Ben-Or},
    189         www_section = {byzantine consensus, GNUnet, secure multiparty computation, set reconciliation, voting},
    190         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ma_dold_consensus_21dec2015.pdf},
    191         www_tags = selected,
    192         url = {https://bibliography.gnunet.org},
    193         author = {Florian Dold}
    194 }
    195 @article {mcb-es2015,
    196         title = {El programa MORECOWBELL de la NSA: Doblan las campanas para el DNS},
    197         year = {2015},
    198         month = jan,
    199         institution = {GNUnet e.V},
    200         address = {M{\"u}nchen},
    201         www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN},
    202         journal = {unknown},
    203         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-es.pdf},
    204         www_tags = selected,
    205         url = {https://bibliography.gnunet.org},
    206         author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}
    207 }
    208 @article {mcb-it2015,
    209         title = {Il programma MORECOWBELL della NSA: Campane a morto per il DNS},
    210         year = {2015},
    211         month = jan,
    212         institution = {GNUnet e.V},
    213         address = {M{\"u}nchen},
    214         www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN},
    215         journal = {unknown},
    216         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-it.pdf},
    217         www_tags = selected,
    218         url = {https://bibliography.gnunet.org},
    219         author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum and Luca Saiu}
    220 }
    221 @article {mcb-fr2015,
    222         title = {Le programme MORECOWBELL de la NSA Sonne le glas du NSA},
    223         year = {2015},
    224         month = jan,
    225         institution = {GNUnet e.V},
    226         address = {M{\"u}nchen},
    227         www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN},
    228         journal = {unknown},
    229         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-fr.pdf},
    230         www_tags = selected,
    231         url = {https://bibliography.gnunet.org},
    232         author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum and Ludovic Court{\`e}s}
    233 }
    234 @article {mcb-en2015,
    235         title = {NSA's MORECOWBELL: Knell for DNS},
    236         year = {2015},
    237         month = jan,
    238         institution = {GNUnet e.V},
    239         address = {M{\"u}nchen},
    240         www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN, TLS},
    241         journal = {unknown},
    242         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-en.pdf},
    243         www_tags = selected,
    244         url = {https://bibliography.gnunet.org},
    245         author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}
    246 }
    247 @mastersthesis {mwachs2014,
    248         title = {A Secure and Resilient Communication Infrastructure for Decentralized Networking Applications},
    249         volume = {PhD},
    250         year = {2015},
    251         month = feb,
    252         pages = {0--250},
    253         school = {Technische Universit{\"a}t M{\"u}nchen},
    254         type = {PhD},
    255         address = {M{\"u}nchen},
    256         abstract = {This thesis provides the design and implementation of a secure and resilient communication infrastructure for decentralized peer-to-peer networks. The proposed communication infrastructure tries to overcome limitations to unrestricted communication on today's Internet and has the goal of re-establishing unhindered communication between users. With the GNU name system, we present a fully decentralized, resilient, and privacy-preserving alternative to DNS and existing security infrastructures},
    257         www_section = {Communication, GNU Name System, GNUnet, P2P, resilience},
    258         www_tags = selected,
    259         isbn = {3-937201-45-9},
    260         doi = {10.2313/NET-2015-02-1},
    261         url = {http://nbn-resolving.de/urn/resolver.pl?urn:bvb:91-diss-20150225-1231854-0-7},
    262         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NET-2015-02-1.pdf},
    263         author = {Matthias Wachs}
    264 }
    265 @mastersthesis {panic2014,
    266         title = {An Approach for Home Routers to Securely Erase Sensitive Data},
    267         volume = {Bachelor},
    268         year = {2014},
    269         month = oct,
    270         pages = {0--64},
    271         school = {Technische Universit{\"a}t M{\"u}nchen},
    272         type = {Bachelor Thesis},
    273         address = {Munich},
    274         abstract = {Home routers are always-on low power embedded systems and part of the Internet infrastructure. In addition to the basic router functionality, they can be used to operate sensitive personal services, such as for private web and email servers, secure peer-to-peer networking services like GNUnet and Tor, and encrypted network file system services. These services naturally involve cryptographic operations with the cleartext keys being stored in RAM. This makes router devices possible targets to physical attacks by home intruders. Attacks include interception of unprotected data on bus wires, alteration of firmware through exposed JTAG headers, or recovery of cryptographic keys through the cold boot attack.
    275         This thesis presents Panic!, a combination of open hardware design and free software to detect physical integrity attacks and to react by securely erasing cryptographic keys and other sensitive data from memory. To improve auditability and to allow cheap reproduction, the components of Panic! are kept simple in terms of conceptual design and lines of code.
    276         First, the motivation to use home routers for services besides routing and the need to protect their physical integrity is discussed. Second, the idea and functionality of the Panic! system is introduced and the high-level interactions between its components explained. Third, the software components to be run on the router are described. Fourth, the requirements of the measurement circuit are declared and a prototype is presented. Fifth, some characteristics of pressurized environments are discussed and the difficulties for finding adequate containments are explained. Finally, an outlook to tasks left for the future is given},
    277         www_section = {GNUnet, home router, intrusion detection, memory erasure, Panic, physical access},
    278         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/panic.pdf},
    279         www_tags = selected,
    280         url = {https://bibliography.gnunet.org},
    281         author = {Nicolas Bene{\v s}}
    282 }
    283 @conference {paper_short2014,
    284         title = {Automatic Transport Selection and Resource Allocation for Resilient Communication in Decentralised Networks},
    285         booktitle = {14-th IEEE International Conference on Peer-to-Peer Computing},
    286         year = {2014},
    287         month = oct,
    288         address = {London. England},
    289         abstract = {Making communication more resilient is a main focus for modern decentralised networks. A current development to increase connectivity between participants and to be resilient against service degradation attempts is to support different communication protocols, and to switch between these protocols in case degradation or censorship are detected. Supporting multiple protocols with different properties and having to share resources for communication with multiple partners creates new challenges with respect to protocol selection and resource allocation to optimally satisfy the applications' requirements for communication.
    290         This paper presents a novel approach for automatic transport selection and resource allocation with a focus on decentralised networks. Our goal is to evaluate the communication mechanisms available for each communication partner and then allocate resources in line with the requirements of the applications.
    291         We begin by detailing the overall requirements for an algorithm for transport selection and resource allocation, and then compare three different solutions using (1) a heuristic, (2) linear optimisation, and (3) machine learning. To show the suitability and the specific benefits of each approach, we evaluate their performance with respect to usability, scalability and quality of the solution found in relation to application requirements},
    292         www_section = {GNUnet, resource allocation},
    293         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper_short.pdf},
    294         www_tags = selected,
    295         url = {https://bibliography.gnunet.org},
    296         author = {Matthias Wachs and Fabian Oehlmann and Christian Grothoff}
    297 }
    298 @book {pir2014,
    299         title = {The Best of Both Worlds: Combining Information-Theoretic and Computational PIR for Communication Efficiency},
    300         booktitle = {Privacy Enhancing Technologies},
    301         series = {Lecture Notes in Computer Science},
    302         volume = {8555},
    303         year = {2014},
    304         pages = {63--82},
    305         publisher = {Springer International Publishing},
    306         organization = {Springer International Publishing},
    307         abstract = {The goal of Private Information Retrieval (PIR) is the ability to query a database successfully without the operator of the database server discovering which record(s) of the database the querier is interested in. There are two main classes of PIR protocols: those that provide privacy guarantees based on the computational limitations of servers (CPIR) and those that rely on multiple servers not colluding for privacy (IT-PIR). These two classes have different advantages and disadvantages that make them more or less attractive to designers of PIR-enabled privacy enhancing technologies.
    308 We present a hybrid PIR protocol that combines two PIR protocols, one from each of these classes. Our protocol inherits many positive aspects of both classes and mitigates some of the negative aspects. For example, our hybrid protocol maintains partial privacy when the security assumptions of one of the component protocols is broken, mitigating the privacy loss in such an event. We have implemented our protocol as an extension of the Percy++ library so that it combines a PIR protocol by Aguilar Melchor and Gaborit with one by Goldberg. We show that our hybrid protocol uses less communication than either of these component protocols and that our scheme is particularly beneficial when the number of records in a database is large compared to the size of the records. This situation arises in applications such as TLS certificate verification, anonymous communications systems, private LDAP lookups, and others},
    309         isbn = {978-3-319-08505-0},
    310         doi = {10.1007/978-3-319-08506-7_4},
    311         url = {http://dx.doi.org/10.1007/978-3-319-08506-7_4},
    312         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pir_0.pdf},
    313         www_section = unsorted,
    314         author = {Devet, Casey and Goldberg, Ian},
    315         editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}
    316 }
    317 @conference {CADET,
    318         title = {CADET: Confidential Ad-hoc Decentralized End-to-End Transport},
    319         booktitle = {Med-Hoc-Net 2014},
    320         year = {2014},
    321         month = jan,
    322         abstract = {This paper describes CADET, a new transport protocol for confidential and authenticated data transfer in decentralized networks. This transport protocol is designed to operate in restricted-route scenarios such as friend-to-friend or ad-hoc wireless networks. We have implemented CADET and evaluated its performance in various network scenarios, compared it to the well-known TCP/IP stack and tested its response to rapidly changing network topologies. While our current implementation is still significantly slower in high-speed low-latency networks, for typical Internet-usage our system provides much better connectivity and security with comparable performance to TCP/IP},
    323         www_section = {CADET, encryption, GNUnet, routing},
    324         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cadet.pdf},
    325         www_tags = selected,
    326         url = {https://bibliography.gnunet.org},
    327         author = {Polot, Bartlomiej and Christian Grothoff}
    328 }
    329 @conference {DistributedSearch2014Hermann,
    330         title = {Censorship-Resistant and Privacy-Preserving Distributed Web Search},
    331         booktitle = {IEEE International Conference on Peer to Peer computing},
    332         year = {2014},
    333         abstract = {The vast majority of Internet users are relying on centralized search engine providers to conduct their web searches. However, search results can be censored and search queries can be recorded by these providers without the user's knowledge. Distributed web search engines based on peer-to-peer networks have been proposed to mitigate these threats. In this paper we analyze the three most popular real-world distributed web search engines: Faroo, Seeks and Yacy, with respect to their censorship resistance and privacy protection. We show that none of them provides an adequate level of protection against an adversary with modest resources. Recognizing these flaws, we identify security properties a censorship-resistant and privacy-preserving distributed web search engine should provide. We propose two novel defense mechanisms called node density protocol and webpage verification protocol to achieve censorship resistance and show their effectiveness and feasibility with simulations. Finally, we elaborate on how state-of-the-art defense mechanisms achieve privacy protection in distributed web search engines},
    334         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributedSearch2014Hermann.pdf},
    335         www_section = unsorted,
    336         url = {https://bibliography.gnunet.org},
    337         author = {Michael Herrmann and Ren Zhang and Kai-Chun Ning and Claudia Diaz}
    338 }
    339 @conference {CANS2014camera-ready,
    340         title = {A Censorship-Resistant, Privacy-Enhancing and Fully Decentralized Name System},
    341         booktitle = {International Conference on Cryptology and Network Security (CANS)},
    342         year = {2014},
    343         publisher = {Springer Verlag},
    344         organization = {Springer Verlag},
    345         abstract = {The Domain Name System (DNS) is vital for access to information on the Internet.  This makes it a target for attackers whose aim is to suppress free access to information. This paper introduces the design and implementation of the GNU Name System (GNS), a fully decentralized and censorship-resistant name system.  GNS provides a privacy-enhancing alternative to DNS which preserves the desirable property of memorable names. Due to its design, it can also double as a partial replacement of public key infrastructures, such as X.509.  The design of GNS incorporates the capability to integrate and coexist with DNS.  GNS is based on the principle of a petname system and builds on ideas from the Simple Distributed Security Infrastructure (SDSI), addressing a central issue with the decentralized mapping of secure identifiers to memorable names: namely the impossibility of providing a global, secure and memorable mapping without a trusted authority. GNS uses the transitivity in the SDSI design to replace the trusted root with secure delegation of authority, thus making petnames useful to other users while operating under a very strong adversary model.  In addition to describing the GNS design, we also discuss some of the mechanisms that are needed to smoothly integrate GNS with existing processes and procedures in Web browsers.  Specifically, we show how GNS is able to transparently support many assumptions that the existing HTTP(S) infrastructure makes about globally unique names},
    346         www_section = {DNS, GNU Name System, GNUnet, PKI},
    347         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper_cans2014_camera_ready.pdf},
    348         www_tags = selected,
    349         url = {https://bibliography.gnunet.org},
    350         author = {Matthias Wachs and Martin Schanzenbach and Christian Grothoff}
    351 }
    352 @mastersthesis {scheibner-thesis2014,
    353         title = {Control Flow Analysis for Event-Driven Programs},
    354         volume = {B.Sc},
    355         year = {2014},
    356         month = jul,
    357         pages = {0--71},
    358         school = {Technical University of Munich},
    359         type = {Bachelors},
    360         address = {Munich},
    361         abstract = {Static analysis is often used to automatically check for common bugs in programs. Compilers already check for some common programming errors and issue warnings; however, they do not do a very deep analysis because this would slow the compilation of the program down. Specialized tools like Coverity or Clang Static Analyzer look at possible runs of a program and track the state of variables in respect to function calls. This information helps to identify possible bugs. In event driven programs like GNUnet callbacks are registered for later execution. Normal static analysis cannot track these function calls. This thesis is an attempt to extend different static analysis tools so that they can handle this case as well. Different solutions were thought of and executed with Coverity and Clang.  This thesis describes the theoretical background of model checking and static analysis, the practical usage of wide spread static analysis tools, and how these tools can be extended in order to improve their usefulness},
    362         www_section = {event-driven, flow control, GNUnet, static analysis},
    363         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scheibner_thesis.pdf},
    364         www_tags = selected,
    365         url = {https://bibliography.gnunet.org},
    366         author = {Florian Scheibner}
    367 }
    368 @mastersthesis {morales2014cryogenic,
    369         title = {Cryogenic: Enabling Power-Aware Applications on Linux},
    370         volume = {M. Sc},
    371         year = {2014},
    372         month = feb,
    373         pages = {0--106},
    374         school = {Technische Universit{\"a}t M{\"u}nchen},
    375         type = {Masters},
    376         address = {Garching bei M{\"u}nchen},
    377         abstract = {As a means of reducing power consumption, hardware devices are capable to enter into sleep-states that have low power consumption. Waking up from those states in order to return to work is typically a rather energy-intensive activity. Some existing applications have non-urgent tasks that currently force hardware to wake up needlessly or prevent it from going to sleep. It would be better if such non-urgent activities could be scheduled to execute when the respective devices are active to maximize the duration of sleep-states. This requires cooperation between applications and the kernel in order to determine when the execution of a task will not be expensive in terms of power consumption.
    378         This work presents the design and implementation of Cryogenic, a POSIX-compatible API that enables clustering tasks based on the hardware activity state. Specifically, Cryogenic's API allows applications to defer their execution until other tasks use the device they want to use. As a result, two actions that contribute to reduce the device energy consumption are achieved: reduce the number of hardware wake-ups and maximize the idle periods.
    379         The energy measurements enacted at the end of this thesis demonstrate that, for the specific setup and conditions present during our experimentation, Cryogenic is capable to achieve savings between 1\% and 10\% for a USB WiFi device.
    380         Although we ideally target mobile platforms, Cryogenic has been developed by means a new Linux module that integrates with the existing POSIX event loop system calls. This allows to use Cryogenic on many different platforms as long as they use a GNU/Linux distribution as the main operating system. An evidence of this can be found in this thesis, where we demonstrate the power savings on a single-board computer},
    381         www_section = {cooperative, cryogenic, GNUnet, Linux, POSIX, power},
    382         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morales2014cryogenic.pdf},
    383         www_tags = selected,
    384         url = {https://bibliography.gnunet.org},
    385         author = {Alejandra Morales}
    386 }
    387 @mastersthesis {dold-thesis2014voting,
    388         title = {Cryptographically Secure, Distributed Electronic Voting},
    389         volume = {B.S},
    390         year = {2014},
    391         month = aug,
    392         pages = {0--49},
    393         school = {Technische Universit{\"a}t M{\"u}nchen},
    394         type = {Bachelor's},
    395         address = {M{\"u}nchen},
    396         abstract = {Elections are a vital tool for decision-making in democratic societies. The past decade has witnessed a handful of attempts to apply modern technology to the election process in order to make it faster and more cost-effective.
    397         Most of the practical efforts in this area have focused on replacing traditional voting booths with electronic terminals, but did not attempt to apply cryptographic techniques able to guarantee critical properties of elections such as secrecy of ballot and verifiability. While such techniques were extensively researched in the past 30 years, practical implementation of cryptographically secure remote electronic voting schemes are not readily available. All existing implementation we are aware of either exhibit critical security flaws, are proprietary black-box systems or require additional physical assumptions such as a preparatory key ceremony executed by the election officials. The latter makes such systems unusable for purely digital communities.
    398         This thesis describes the design and implementation of an electronic voting system in GNUnet, a framework for secure and decentralized networking. We provide a short survey of voting schemes and existing implementations. The voting scheme we implemented makes use of threshold cryptography, a technique which requires agreement among a large subset of the election officials to execute certain cryptographic operations. Since such protocols have applications outside of electronic voting, we describe their design and implementation in GNUnet separately},
    399         www_section = {GNUnet, secure multiparty computation, voting},
    400         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ba_dold_voting_24aug2014.pdf},
    401         www_tags = selected,
    402         url = {https://bibliography.gnunet.org},
    403         author = {Florian Dold}
    404 }
    405 @mastersthesis {decmon2014,
    406         title = {A Decentralized and Autonomous Anomaly Detection Infrastructure for Decentralized Peer-to-Peer Networks},
    407         volume = {Master},
    408         year = {2014},
    409         month = oct,
    410         pages = {0--63},
    411         type = {Master},
    412         abstract = {In decentralized networks, collecting and analysing information from the network is useful for developers and operators to monitor the behaviour and detect anomalies such as attacks or failures in both the overlay and underlay networks. But realizing such an infrastructure is hard to achieve due to the decentralized nature of the network especially if the anomaly occurs on systems not operated by developers or participants get separated from the collection points. In this thesis a decentralized monitoring infrastructure using a decentralized peer-to-peer network is developed to collect information and detect anomalies in a collaborative way without coordination by and in absence of a centralized infrastructure and report detected incidents to a monitoring infrastructure.
    413         We start by introducing background information about peer-to-peer networks, anomalies and anomaly detection techniques in literature. Then we present some of the related work regarding monitoring decentralized networks, anomaly detection and data aggregation in decentralized networks. Then we perform an analysis of the system objectives, target environment and the desired properties of the system. Then we design the system in terms of the overall structure and its individual components. We follow with details about the system implementation. Lastly, we evaluate the final system implementation against our desired objectives},
    414         www_section = {anomaly, censorship, detection, GNUnet},
    415         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/decmon_0.pdf},
    416         www_tags = selected,
    417         url = {https://bibliography.gnunet.org},
    418         author = {Omar Tarabai}
    419 }
    420 @book {anonymity_and_cover_traffic2014,
    421         title = {Do Dummies Pay Off? Limits of Dummy Traffic Protection in Anonymous Communications},
    422         booktitle = {Privacy Enhancing Technologies},
    423         series = {Lecture Notes in Computer Science},
    424         volume = {8555},
    425         year = {2014},
    426         pages = {204--223},
    427         publisher = {Springer International Publishing},
    428         organization = {Springer International Publishing},
    429         abstract = {Anonymous communication systems ensure that correspondence between senders and receivers cannot be inferred with certainty.However, when patterns are persistent, observations from anonymous
    430 communication systems enable the reconstruction of user behavioral profiles. Protection against profiling can be enhanced by adding dummy messages, generated by users or by the anonymity provider, to the communication. In this paper we study the limits of the protection provided by this countermeasure. We propose an analysis methodology based on solving a least squares problem that permits to characterize the adversary's profiling error with respect to the user behavior, the anonymity provider behavior, and the dummy strategy. Focusing on the particular case of a timed pool mix we show how, given a privacy target, the performance analysis can be used to design optimal dummy strategies to protect this objective},
    431         www_section = {anonymous communications, disclosure attacks, dummies},
    432         isbn = {978-3-319-08505-0},
    433         doi = {10.1007/978-3-319-08506-7_11},
    434         url = {http://dx.doi.org/10.1007/978-3-319-08506-7_11},
    435         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/anonymity_and_cover_traffic.pdf},
    436         author = {Oya, Simon and Troncoso, Carmela and P{\'e}rez-Gonz{\'a}lez, Fernando},
    437         www_section = unsorted,
    438         editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}
    439 }
    440 @article {private_presence_service2014,
    441         title = {DP5: A Private Presence Service},
    442         journal = { Centre for Applied Cryptographic Research (CACR), University of Waterloo},
    443         year = {2014},
    444         month = may,
    445         type = {Technical Report},
    446         abstract = {The recent NSA revelations have shown that {\textquotedblleft}address book{\textquotedblright} and {\textquotedblleft}buddy list{\textquotedblright} information are routinely targeted for mass interception. As a response to this threat, we present DP5, a cryptographic
    447 service that provides privacy-friendly indication of presence to support real-time communications. DP5 allows clients to register and query the online presence of their list of friends while keeping this
    448 list secret. Besides presence, high-integrity status updates are supported, to facilitate key update and rendezvous protocols. While infrastructure services are required for DP5 to operate, they are
    449 designed to not require any long-term secrets and provide perfect forward secrecy in case of compromise. We provide security arguments for the indistinguishability properties of the protocol, as well
    450 as an evaluation of its performance},
    451         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DP5\%3A\%20A\%20Private\%20Presence\%20Service.pdf},
    452         www_section = unsorted,
    453         url = {https://bibliography.gnunet.org},
    454         author = {Borisov, Nikita and Danezis, George and Goldberg, Ian}
    455 }
    456 @book {obfuscation_osn2014,
    457         title = {On the Effectiveness of Obfuscation Techniques in Online Social Networks},
    458         booktitle = {Privacy Enhancing Technologies},
    459         series = {Lecture Notes in Computer Science},
    460         volume = {8555},
    461         year = {2014},
    462         pages = {42--62},
    463         publisher = {Springer International Publishing},
    464         organization = {Springer International Publishing},
    465         abstract = {Data obfuscation is a well-known technique for protecting user privacy against inference attacks, and it was studied in diverse settings, including search queries, recommender systems, location-based services and Online Social Networks (OSNs). However, these studies typically take the point of view of a single user who applies obfuscation, and focus on protection of a single target attribute. Unfortunately, while narrowing the scope simplifies the problem, it overlooks some significant challenges that effective obfuscation would need to address in a more realistic setting. First, correlations between attributes imply that obfuscation conducted to protect a certain attribute, may influence inference attacks targeted at other attributes. In addition, when multiple users conduct obfuscation simultaneously, the combined effect of their obfuscations may be significant enough to affect the inference mechanism to their detriment. In this work we focus on the OSN setting and use a dataset of 1.9 million Facebook profiles to demonstrate the severity of these problems and explore possible solutions. For example, we show that an obfuscation policy that would limit the accuracy of inference to 45\% when applied by a single user, would result in an inference accuracy of 75\% when applied by 10\% of the users. We show that a dynamic policy, which is continuously adjusted to the most recent data in the OSN, may mitigate this problem. Finally, we report the results of a user study, which indicates that users are more willing to obfuscate their profiles using popular and high quality items. Accordingly, we propose and evaluate an obfuscation strategy that satisfies both user needs and privacy protection},
    466         isbn = {978-3-319-08505-0},
    467         doi = {10.1007/978-3-319-08506-7_3},
    468         url = {http://dx.doi.org/10.1007/978-3-319-08506-7_3},
    469         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/obfuscation_osn.pdf},
    470         www_section = unsorted,
    471         author = {Chen, Terence and Boreli, Roksana and Kaafar, Mohamed-Ali and Friedman, Arik},
    472         editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}
    473 }
    474 @mastersthesis {SupritiSinghMasterThesis2014,
    475         title = {Experimental comparison of Byzantine fault tolerant distributed hash tables},
    476         volume = {M.S},
    477         year = {2014},
    478         month = sep,
    479         pages = {0--42},
    480         school = {Saarland University},
    481         type = {Masters},
    482         address = {Saarbruecken},
    483         abstract = {Distributed Hash Tables (DHTs) are a key data structure for construction of a peer to peer systems. They provide an efficient way to distribute the storage and retrieval of key-data pairs among the participating peers. DHTs should be scalable, robust against churn and resilient to attacks. X-Vine is a DHT protocol which offers security against Sybil attacks. All communication among peers is performed over social network links, with the presumption that a friend can be trusted. This trust can be extended to a friend of a friend. It uses the tested Chord Ring topology as an overlay, which has been proven to be scalable and robust. The aim of the thesis is to experimentally compare two DHTs, R5 N and X-Vine. GNUnet is a free software secure peer to peer framework, which uses R 5N . In this thesis, we have presented the implementation of X-Vine on GNUnet, and compared the performance of R5 N and X-Vine},
    484         www_section = {DHT, GNUnet, performance analysis, testbed, X-vine},
    485         www_tags = selected,
    486         url = {https://bibliography.gnunet.org},
    487         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SupritiSinghMasterThesis.pdf},
    488         author = {Supriti Singh}
    489 }
    490 @mastersthesis {2017_0,
    491 	title = {The GNUnet System},
    492 	volume = {HDR},
    493 	year = {2017},
    494 	month = dec,
    495 	pages = {0--181},
    496 	school = {Universit{\'e} de Rennes 1},
    497 	type = {Habilitation {\`a} diriger des recherches},
    498 	address = {Rennes},
    499 	abstract = {GNUnet is an alternative network stack for building secure, decentralized and privacy-preserving distributed applications.  Our goal is to replace the old insecure Internet protocol stack.  Starting from an application for secure publication of files, it has grown to include all kinds of basic protocol components and applications towards the creation of a GNU internet. This habilitation provides an overview of the GNUnet architecture, including the development process, the network architecture and the software architecture.  The goal of Part 1 is to provide an overview of how the various parts of the project work together today, and to then give ideas for future directions.  The text is a first attempt to provide this kind of synthesis, and in return does not go into extensive technical depth on any particular topic. Part 2 then gives selected technical details based on eight publications covering many of the core components.  This is a harsh selection; on the GNUnet website there are more than 50 published research papers and theses related to GNUnet, providing extensive and in-depth documentation.  Finally, Part 3 gives an overview of current plans and future work},
    500 	keywords = {decentralization, GNUnet, peer-to-peer, privacy, private information retrieval, routing, secure multiparty computation, self-organization},
    501         www_section = {decentralization, GNUnet, peer-to-peer, privacy, private information retrieval, routing, secure multiparty computation, self-organization},
    502         www_tags = selected,
    503 	doi = {https://hal.inria.fr/tel-01654244},
    504 	url = {https://grothoff.org/christian/habil.pdf},
    505 	author = {Grothoff, Christian}
    506 }
    507 @article {2018_0,
    508 	title = {Toward secure name resolution on the internet},
    509 	journal = {Computers & Security},
    510 	year = {2018},
    511 	abstract = {The Domain Name System (DNS) provides crucial name resolution functions for most Internet services. As a result, DNS traffic provides an important attack vector for mass surveillance, as demonstrated by the QUANTUMDNS and MORECOWBELL programs of the NSA. This article reviews how DNS works and describes security considerations for next generation name resolution systems. We then describe DNS variations and analyze their impact on security and privacy. We also consider Namecoin, the GNU Name System and RAINS, which are more radical re-designs of name systems in that they both radically change the wire protocol and also eliminate the existing global consensus on TLDs provided by ICANN. Finally, we assess how the different systems stack up with respect to the goal of improving security and privacy of name resolution for the future Internet},
    512 	keywords = {Future Internet, GNUnet, Name resolution, network architecture, privacy, Technology and society},
    513         www_section = {Future Internet, GNUnet, Name resolution, network architecture, privacy, Technology and society},
    514 	issn = {0167-4048},
    515 	doi = {https://doi.org/10.1016/j.cose.2018.01.018},
    516 	url = {http://www.sciencedirect.com/science/article/pii/S0167404818300403},
    517         www_tags = selected,
    518 	author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}
    519 }
    520 @inproceedings {2018_1,
    521   title = {reclaimID: Secure, Self-Sovereign Identities using Name Systems and Attribute-Based Encryption},
    522 	booktitle={Proceedings of 17th IEEE International Conference On Trust, Security And Privacy In Computing And Communications/ 12th IEEE International Conference On Big Data Science And Engineering (TrustCom/BigDataSE)},
    523 	year = {2018},
    524 	abstract = {In this paper we present reclaimID: An architecture that allows users to reclaim their digital identities by securely sharing identity attributes without the need for a centralised service provider. We propose a design where user attributes are stored in and shared over a name system under user-owned namespaces. Attributes are encrypted using attribute-based encryption (ABE), allowing the user to selectively authorize and revoke access of requesting parties to subsets of his attributes. We present an implementation based on the decentralised GNU Name System (GNS) in combination with ciphertext-policy ABE using type-1 pairings. To show the practicality of our implementation, we carried out experimental evaluations of selected implementation aspects including attribute resolution performance. Finally, we show that our design can be used as a standard OpenID Connect Identity Provider allowing our implementation to be integrated into standard-compliant services},
    525 	keywords = {Computer Science - Cryptography and Security},
    526         www_section = {Computer Science - Cryptography and Security},
    527 	url = {https://arxiv.org/abs/1805.06253v1},
    528         www_tags = selected,
    529 	author = {Schanzenbach, M. and Bramm, G. and Sch{\"u}tte, J.}
    530 }
    531 
    532 @article {DASEIN,
    533 	title = {Decentralized Authentication for Self-Sovereign Identities using Name Systems},
    534 	number = {847382},
    535 	year = {2018},
    536 	month = oct,
    537 	institution = {Berner Fachhochschule},
    538 	type = {H2020 submission},
    539 	address = {Bern},
    540 	abstract = {The GNU Name System (GNS) is a fully decentralized public key infrastructure and name system with private information retrieval semantics. It serves a holistic approach to interact seamlessly with IoT ecosystems and enables people and their smart objects to prove their identity, membership and privileges - compatible with existing technologies.
    541 In this report we demonstrate how a wide range of private authentication and identity management scenarios are addressed by GNS in a cost-efficient, usable and secure manner.  This simple, secure and privacy-friendly authentication method is a significant breakthrough when cyber peace, privacy and liability are the priorities for the benefit of a wide range of the population.
    542 After an introduction to GNS itself, we show how GNS can be used to authenticate servers, replacing the Domain Name System (DNS) and X.509 certificate authorities (CAs) with a more privacy-friendly but equally usable protocol which is trustworthy, human-centric and includes group authentication. We also built a demonstrator to highlight how GNS can be used in medical computing to simplify privacy-sensitive data processing in the Swiss health-care system. Combining GNS with attribute-based encryption, we created ReclaimID, a robust and reliable OpenID Connect-compatible authorization system. It includes simple, secure and privacy-friendly single sign-on to seamlessly share selected attributes with Web services, cloud ecosystems. Further, we demonstrate how ReclaimID can be used to solve the problem of addressing, authentication and data sharing for IoT devices.
    543 These applications are just the beginning for GNS; the versatility and extensibility of the protocol will lend itself to an even broader range of use-cases.
    544 GNS is an open standard with a complete free software reference implementation created by the GNU project. It can therefore be easily audited, adapted, enhanced, tailored, developed and/or integrated, as anyone is allowed to use the core protocols and implementations free of charge, and to adopt them to their needs under the terms of the GNU Affero General Public License, a free software license approved by the Free Software Foundation.},
    545 	keywords = {DNS, GNU Name System, GNUnet, privacy, ReclaimID},
    546       	www_section = {DNS, GNU Name System, GNUnet, privacy, ReclaimID},
    547         www_tags = selected,
    548         url = {https://bibliography.gnunet.org},
    549         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dasein10.pdf},
    550 	author = {Christian Grothoff and Martin Schanzenbach and Annett Laube and Emmanuel Benoist}
    551 }
    552 
    553 @book {forward_secure_encryption2014,
    554         title = {Forward-Secure Distributed Encryption},
    555         booktitle = {Privacy Enhancing Technologies},
    556         series = {Lecture Notes in Computer Science},
    557         volume = {8555},
    558         year = {2014},
    559         pages = {123--142},
    560         publisher = {Springer International Publishing},
    561         organization = {Springer International Publishing},
    562         abstract = {Distributed encryption is a cryptographic primitive that implements revocable privacy. The primitive allows a recipient of a message to decrypt it only
    563 if enough senders encrypted that same message. We present a new distributed encryption scheme that is simpler than the previous solution by Hoepman and
    564 Galindo{\textemdash}in particular it does not rely on pairings{\textemdash}and that satisfies stronger security requirements. Moreover, we show how to achieve key evolution, which is necessary to ensure scalability in many practical applications, and prove that the
    565 resulting scheme is forward secure. Finally, we present a provably secure batched
    566 distributed encryption scheme that is much more efficient for small plaintext domains, but that requires more storage},
    567         isbn = {978-3-319-08505-0},
    568         doi = {10.1007/978-3-319-08506-7_7},
    569         url = {http://dx.doi.org/10.1007/978-3-319-08506-7_7},
    570         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/foward_secure_encryption.pdf},
    571         www_section = unsorted,
    572         author = {Lueks, Wouter and Hoepman, Jaap-Henk and Kursawe, Klaus},
    573         editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}
    574 }
    575 @mastersthesis {ma_kirsch_2014_0,
    576         title = {Improved Kernel-Based Port-Knocking in Linux},
    577         volume = {M.S},
    578         year = {2014},
    579         month = aug,
    580         type = {Master's},
    581         abstract = {Port scanning is used to discover vulnerable services and launch attacks against network infrastructure. Port knocking is a well-known technique to hide TCP servers from port scanners. This thesis presents the design of TCP Stealth, a socket option to realize new port knocking variant with improved security and usability compared to previous designs.
    582 
    583 TCP Stealth replaces the traditional random TCP SQN number with a token that authenticates the client and (optionally) the first bytes of the TCP payload.  Clients and servers can enable TCP Stealth by explicitly setting a socket option or linking against a library that wraps existing network system calls.
    584 
    585 This thesis also describes Knock, a free software implementation of TCP Stealth for the Linux kernel and {\tt libknockify}, a shared library that wraps network system calls to activate Knock on GNU/Linux systems, allowing administrators to deploy Knock without recompilation.  Finally, we present experimental results demonstrating that TCP Stealth is compatible with most existing middleboxes on the Internet},
    586         www_section = {GNUnet, Hacienda, Knock, TCP Stealth},
    587         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ma_kirsch_2014_0.pdf},
    588         www_section = unsorted,
    589         url = {https://bibliography.gnunet.org},
    590         author = {Julian Kirsch}
    591 }
    592 @conference {strint2014,
    593         title = {The Internet is Broken: Idealistic Ideas for Building a GNU Network},
    594         booktitle = {W3C/IAB Workshop on Strengthening the Internet Against Pervasive Monitoring (STRINT)},
    595         year = {2014},
    596         month = feb,
    597         publisher = {W3C/IAB},
    598         organization = {W3C/IAB},
    599         address = {London, UK},
    600         www_section = {GNU Name System, GNUnet, KBR, PKI},
    601         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/strint2014.pdf},
    602         www_section = unsorted,
    603         url = {https://bibliography.gnunet.org},
    604         author = {Christian Grothoff and Polot, Bartlomiej and Carlo von Loesch}
    605 }
    606 @mastersthesis {oehlmann2014machinelearning,
    607         title = {Machine Learning for Bandwidth Management in Decentralized Networks},
    608         volume = {M. Sc},
    609         year = {2014},
    610         month = feb,
    611         pages = {0--91},
    612         school = {Technische Universit{\"a}t M{\"u}nchen},
    613         type = {Masters},
    614         address = {Garching bei M{\"u}nchen},
    615         abstract = {The successful operation of a peer-to-peer network depends on the resilience of its peer's
    616 communications. On the Internet, direct connections between peers are often limited by restrictions like NATs and traffic filtering. Addressing such problems is particularly pressing for peer-to-peer networks that do not wish to rely on any trusted infrastructure, which might otherwise help the participants establish communication channels. Modern peer-to-peer networks employ various techniques to address the problem of restricted connectivity on the Internet. One interesting development is that various overlay networks now support multiple communication protocols to improve resilience and counteract service degradation.
    617 
    618 The support of multiple protocols causes a number of new challenges. A peer should evaluate which protocols fulfill the communication requirements best. Furthermore, limited resources, such as bandwidth, should be distributed among peers and protocols to match application requirements. Existing approaches to this problem of transport selection and resource allocation are rigid: they calculate the solution only from the current state of the
    619 environment, and do not adapt their strategy based on failures or successes of previous
    620 allocations.
    621 
    622 This thesis explores the feasibility of using machine learning to improve the quality of the transport selection and resource allocation over current approaches. The goal is to improve the solution process by learning selection and allocation strategies from the experience gathered in the course of many iterations of the algorithm. We compare the different approaches in the field of machine learning with respect to their properties and suitability for the problem. Based on this evaluation and an in-depth analysis of the requirements of the underlying problem, the thesis presents a design how reinforcement learning can be used and adapted to the given problem domain.
    623 
    624 The design is evaluated with the help of simulation and a realistic implementation in the GNUnet Peer-to-Peer framework. Our experimental results highlight some of the implications of the multitude of implementation choices, key challenges, and possible directions for the use of reinforcement learning in this domain},
    625         www_section = {bandwidth allocation, GNUnet, machine learning},
    626         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oehlmann2014machinelearning.pdf},
    627         www_section = unsorted,
    628         url = {https://bibliography.gnunet.org},
    629         author = {Fabian Oehlmann}
    630 }
    631 @mastersthesis {arias2014bs,
    632         title = {Numerical Stability and Scalability of Secure Private Linear Programming},
    633         volume = {B. Sc},
    634         year = {2014},
    635         month = feb,
    636         pages = {0--65},
    637         school = {Technische Universit{\"a}t M{\"u}nchen},
    638         type = {Bachelor's},
    639         address = {Garching bei M{\"u}nchen},
    640         abstract = {Linear programming (LP) has numerous applications in different fields. In some scenarios, e.g. supply chain master planning (SCMP), the goal is solving linear programs involving multiple parties reluctant to sharing their private information. In this case, methods from the area of secure multi-party computation (SMC) can be used. Secure multi-party versions of LP solvers have been known to be impractical due to high communication complexity. To overcome this, solutions based on problem transformation have been put forward.
    641 
    642 In this thesis, one such algorithm, proposed by Dreier and Kerschbaum, is discussed, implemented, and evaluated with respect to numerical stability and scalability. Results
    643 obtained with different parameter sets and different test cases are presented and some problems are exposed. It was found that the algorithm has some unforeseen limitations, particularly when implemented within the bounds of normal primitive data types. Random numbers generated during the protocol have to be extremely small so as to not cause problems with overflows after a series of multiplications. The number of peers participating additionally limits the size of numbers. A positive finding was that results produced when none of the aforementioned problems occur are generally quite accurate. We discuss a few possibilities to overcome some of the problems with an implementation using arbitrary precision numbers},
    644         www_section = {GNUnet, linear programming, secure multi-party computation},
    645         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/arias2014bs.pdf},
    646         www_section = unsorted,
    647         url = {https://bibliography.gnunet.org},
    648         author = {Raphael Arias}
    649 }
    650 @conference {DBLP:conf/stoc/Ullman13,
    651         title = {Answering $n^{2+o(1)}$ Counting Queries with Differential Privacy is Hard},
    652         booktitle = {Answering $n^{2+o(1)}$ Counting Queries with Differential Privacy is Hard},
    653         year = {2013},
    654         pages = {361--370},
    655         www_section = unsorted,
    656         url = {https://bibliography.gnunet.org},
    657         author = {Jonathan Ullman}
    658 }
    659 @book {Broadening2013Chatzikokolakis,
    660         title = {Broadening the Scope of Differential Privacy Using Metrics},
    661         booktitle = {Privacy Enhancing Technologies},
    662         series = {Lecture Notes in Computer Science},
    663         volume = {7981},
    664         year = {2013},
    665         pages = {82--102},
    666         publisher = {Springer Berlin Heidelberg},
    667         organization = {Springer Berlin Heidelberg},
    668         abstract = {Differential Privacy is one of the most prominent frameworks used to deal with disclosure prevention in statistical databases. It provides a formal privacy guarantee, ensuring that sensitive information relative to individuals cannot be easily inferred by disclosing answers to aggregate queries. If two databases are adjacent, i.e. differ only for an individual, then the query should not allow to tell them apart by more than a certain factor. This induces a bound also on the distinguishability of two generic databases, which is determined by their distance on the Hamming graph of the adjacency relation.
    669 In this paper we explore the implications of differential privacy when the indistinguishability requirement depends on an arbitrary notion of distance. We show that we can naturally express, in this way, (protection against) privacy threats that cannot be represented with the standard notion, leading to new applications of the differential privacy framework. We give intuitive characterizations of these threats in terms of Bayesian adversaries, which generalize two interpretations of (standard) differential privacy from the literature. We revisit the well-known results stating that universally optimal mechanisms exist only for counting queries: We show that, in our extended setting, universally optimal mechanisms exist for other queries too, notably sum, average, and percentile queries. We explore various applications of the generalized definition, for statistical databases as well as for other areas, such that geolocation and smart metering},
    670         isbn = {978-3-642-39076-0},
    671         doi = {10.1007/978-3-642-39077-7_5},
    672         url = {http://dx.doi.org/10.1007/978-3-642-39077-7_5},
    673         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brodening2013Chatzikokolakis.pdf},
    674         www_section = unsorted,
    675         author = {Chatzikokolakis, Konstantinos and Andr{\'e}s, MiguelE. and Bordenabe, Nicol{\'a}sEmilio and Palamidessi, Catuscia},
    676         editor = {De Cristofaro, Emiliano and Wright, Matthew}
    677 }
    678 @mastersthesis {gnunset-psyc2013,
    679         title = {Design of a Social Messaging System Using Stateful Multicast},
    680         volume = {M.Sc},
    681         year = {2013},
    682         pages = {0--76},
    683         school = {University of Amsterdam},
    684         type = {Master's},
    685         address = {Amsterdam},
    686         abstract = {This work presents the design of a social messaging service for the GNUnet peer-to-peer framework that offers scalability, extensibility, and end-to-end encrypted communication.  The scalability property is achieved through multicast message delivery, while extensibility is made possible by using PSYC (Protocol for SYnchronous Communication), which provides an extensible RPC (Remote Procedure Call) syntax that can evolve over time without having to upgrade the software on all nodes in the network.  Another key feature provided by the PSYC layer are stateful multicast channels, which are used to store e.g. user profiles.  End-to-end encrypted communication is provided by the mesh service of GNUnet, upon which the multicast channels are built.  Pseudonymous users and social places in the system have cryptographical identities --- identified by their public key --- these are mapped to human memorable names using GNS (GNU Name System), where each pseudonym has a zone pointing to its places},
    687         www_section = {GNS, GNUnet, PSYC, social networks},
    688         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gnunet-psyc.pdf},
    689         www_section = unsorted,
    690         url = {https://bibliography.gnunet.org},
    691         author = {Gabor X Toth}
    692 }
    693 @conference {fps2013wachs,
    694         title = {On the Feasibility of a Censorship Resistant Decentralized Name System},
    695         booktitle = {6th International Symposium on Foundations & Practice of Security (FPS 2013)},
    696         year = {2013},
    697         month = oct,
    698         publisher = {Springer Verlag},
    699         organization = {Springer Verlag},
    700         address = {La Rochelle, France},
    701         abstract = {A central problem on the Internet today is that key infrastructure for security is concentrated in a few places.  This is particularly true in the areas of naming and public key infrastructure. Secret services and other government organizations can use this fact to block access to information or monitor communications.  One of the most popular and easy to perform techniques is to make information on the Web inaccessible by censoring or manipulating the Domain Name System (DNS).  With the introduction of DNSSEC, the DNS is furthermore posed to become an alternative PKI to the failing X.509 CA system, further cementing the power of those in charge of operating DNS.
    702 
    703 This paper maps the design space and gives design requirements for censorship resistant name systems.  We survey the existing range of ideas for the realization of such a system and discuss the challenges these systems have to overcome in practice.  Finally, we present the results from a survey on browser usage, which supports the idea that delegation should be a key ingredient in any censorship resistant name system},
    704         www_section = {DNS, GNS, GNU Name System, GNUnet, PKI, SDSI, Zooko's Triangle},
    705         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fps2013wachs.pdf},
    706         www_section = unsorted,
    707         url = {https://bibliography.gnunet.org},
    708         author = {Matthias Wachs and Martin Schanzenbach and Christian Grothoff}
    709 }
    710 @conference {DBLP:conf/netys/BoutetFJKR13,
    711         title = {FreeRec: An Anonymous and Distributed Personalization Architecture},
    712         booktitle = {FreeRec: An Anonymous and Distributed Personalization Architecture},
    713         year = {2013},
    714         pages = {58--73},
    715         www_section = unsorted,
    716         url = {https://bibliography.gnunet.org},
    717         author = {Antoine Boutet and Davide Frey and Arnaud Jegou and Anne-Marie Kermarrec and Heverson B. Ribeiro}
    718 }
    719 @mastersthesis {2013_1,
    720         title = {Large Scale Distributed Evaluation of Peer-to-Peer Protocols},
    721         volume = {Master of Science},
    722         year = {2013},
    723         month = jun,
    724         pages = {0--76},
    725         school = {Technische Universit{\"a}t M{\"u}nchen},
    726         type = {Masters },
    727         address = {Garching bei M{\"u}nchen},
    728         abstract = {Evaluations of P2P protocols during the system's design and implementation phases are commonly done through simulation and emulation respectively.  While the current state-of-the-art simulation allows evaluations with many millions of peers through the use of abstractions, emulation still lags behind as it involves executing the real implementation at some parts of the system.  This difference in scales can make it hard to relate the evaluations made created with simulation and emulation during the design and implementation phases and can results in a limited evaluation of the implementation, which may cause severe problems after deployment.
    729 
    730 In this thesis, we build upon an existing emulator for P2P applications to push the scales offered by emulation towards the limits set by simulation.  Our approach distributes and co-ordinates the emulation across many hosts.  Large deployments are possible by deploying hundreds or thousands of peers on each host.
    731 
    732 To address the varying needs of an experimenter and the range of available hardware, we make our approach scalable such that it can easily be adapted to run evaluations on a single machine or a large group of hosts.  Specifically, the system automatically adjusts the number of overlapping operations to the available resources efficiently using a feedback mechanism, thus relieving the experimenter from the hassles of manual tuning.
    733 
    734 We specifically target HPC systems like compute clusters and supercomputers and demonstrate how such systems can be used for large scale emulations by evaluating two P2P applications with deployment sizes up to 90k peers on a supercomputer},
    735         www_section = {emulation, GNUnet, large scale testing, protocol evaluation, testbed},
    736         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thesis_lowres.pdf , https://git.gnunet.org/bibliography.git/plain/docs/thesis.pdf},
    737         www_section = unsorted,
    738         url = {https://bibliography.gnunet.org},
    739         author = {Totakura, Sree Harsha}
    740 }
    741 @mastersthesis {2013_2,
    742         title = {Monkey--Generating Useful Bug Reports Automatically},
    743         volume = {Bachelor},
    744         year = {2013},
    745         month = jul,
    746         pages = {0--50},
    747         school = {Technische Universit{\"a}t M{\"u}nchen},
    748         type = {Bachelor Thesis},
    749         address = {Munich},
    750         abstract = {Automatic crash handlers support software developers in finding bugs and fixing the problems in their code. Most of them behave similarly in providing the developer with a (symbolic) stack trace and a memory dump of the crashed application. This introduces some problems that we try to fix with our proposed automatic bug reporting system called "Monkey".
    751 
    752 In this paper we describe the problems that occur when debugging widely distributed systems and how Monkey handles them. First, we describe our Motivation for develop- ing the Monkey system. Afterwards we present the most common existing automatic crash handlers and how they work. Thirdly you will get an overview of the Monkey system and its components. In the fourth chapter we will analyze one report gener- ated by Monkey, evaluate an online experiment we conducted and present some of our finding during the development of the clustering algorithm used to categorize crash reports. Last, we discuss some of Monkeys features and compare them to the existing approaches. Also some ideas for the future development of the Monkey system are presented before we conclude that Monkey's approach is promising, but some work is still left to establish Monkey in the open source community},
    753         www_section = {automatic, clustering, debugging, GDB, GNUnet, report, Tor},
    754         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/main_0.pdf},
    755         www_section = unsorted,
    756         url = {https://bibliography.gnunet.org},
    757         author = {Markus Teich}
    758 }
    759 @conference {2013_3,
    760         title = {Persea: A Sybil-resistant Social DHT},
    761         booktitle = {Proceedings of the Third ACM Conference on Data and Application Security and Privacy},
    762         year = {2013},
    763         publisher = {ACM},
    764         organization = {ACM},
    765         address = {New York, NY, USA},
    766         abstract = {P2P systems are inherently vulnerable to Sybil attacks, in which an attacker can have a large number of identities and use them to control a substantial fraction of the system. We propose Persea, a novel P2P system that is more robust against Sybil attacks than prior approaches. Persea derives its Sybil resistance by assigning IDs through a bootstrap tree, the graph of how nodes have joined the system through invitations. More specifically, a node joins Persea when it gets an invitation from an existing node in the system. The inviting node assigns a node ID to the joining node and gives it a chunk of node IDs for further distribution. For each chunk of ID space, the attacker needs to socially engineer a connection to another node already in the system. This hierarchical distribution of node IDs confines a large attacker botnet to a considerably smaller region of the ID space than in a normal P2P system. Persea uses a replication mechanism in which each (key,value) pair is stored in nodes that are evenly spaced over the network. Thus, even if a given region is occupied by attackers, the desired (key,value) pair can be retrieved from other regions. We compare our results with Kad, Whanau, and X-Vine and show that Persea is a better solution against Sybil attacks. collapse},
    767         www_section = {security, social dht, Sybil attack},
    768         isbn = {978-1-4503-1890-7},
    769         doi = {10.1145/2435349.2435372},
    770         url = {http://doi.acm.org.eaccess.ub.tum.de/10.1145/2435349.2435372},
    771         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p169-al-ameen.pdf},
    772         author = {Al-Ameen, Mahdi N. and Matthew Wright}
    773 }
    774 @book {sep-privacy,
    775         title = {Privacy},
    776         booktitle = {The Stanford Encyclopedia of Philosophy},
    777         year = {2013},
    778         edition = {Fall 2013},
    779         author = {DeCew, Judith},
    780         publisher = {unknown},
    781         www_section = unsorted,
    782         url = {https://bibliography.gnunet.org},
    783         editor = {Edward N. Zalta}
    784 }
    785 @article {2013_4,
    786         title = {Public Key Pinning for TLS Using a Trust on First Use Model},
    787         year = {2013},
    788         abstract = {Although the Public Key Infrastructure (PKI) using X.509 is meant to prevent the occurrence of man-in-the-middle attacks on TLS, there are still situations in which such attacks are possible due to the large number of Certification Authorities (CA) that has to be trusted. Recent incidents involving CA compromises, which lead to issuance of rogue certificates indicate the weakness of the PKI model. Recently various public key pinning protocols -- such as DANE or TACK -- have been proposed to thwart man-in-the-middle attacks on TLS connections. It will take a longer time, however, until any of these protocols reach wide deployment. We present an approach intended as an interim solution to bridge this gap and provide protection for connections to servers not yet using a pinning protocol. The presented method is based on public key pinning with a trust on first use model, and can be combined with existing notary approaches as well},
    789         www_section = {certificate, pinning, PKI, public key pinning, TLS, TOFU, trust on first use, X.509},
    790         journal = {unknown},
    791         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tofu-pinning.pdf},
    792         www_section = unsorted,
    793         url = {https://bibliography.gnunet.org},
    794         author = {Gabor X Toth},
    795         editor = {Tjebbe Vlieg}
    796 }
    797 @mastersthesis {2013_5,
    798         title = {Speeding Up Tor with SPDY},
    799         volume = {Master's in Computer Science},
    800         year = {2013},
    801         month = nov,
    802         pages = {0--124},
    803         school = {Technische Universit{\"a}t M{\"u}nchen},
    804         type = {Master's},
    805         address = {Garching bei M{\"u}nchen},
    806         abstract = {SPDY is a rather new protocol which is an alternative to HTTP. It was designed to address inefficiencies in the latter and thereby improve latency and reduce bandwidth consumption.
    807 
    808 This thesis presents the design and implementation of a setup for utilizing SPDY within the anonymizing Tor network for reducing latency and traffic in the latter. A C library implementing the SPDY server protocol is introduced together with an HTTP to SPDY and a SPDY to HTTP proxy which are the base for the presented design.
    809 
    810 Furthermore, we focus on the SPDY server push feature which allows servers to send multiple responses to a single request for reducing latency and traffic on loading web pages. We propose a prediction algorithm for employing push at SPDY servers and proxies. The algorithm makes predictions based on previous requests and responses and initially does not know anything about the data which it will push.
    811 
    812 This thesis includes extensive measurement data highlighting the possible benefits of using SPDY instead of HTTP and HTTPS (1.0 or 1.1), especially with respect to networks experiencing latency or loss. Moreover, the real profit from using SPDY within the Tor network on loading some of the most popular web sites is presented. Finally, evaluations of the proposed push prediction algorithm are given for emphasizing the possible gain of employing it at SPDY reverse and forward proxies},
    813         www_section = {anonymity, HTTP, privacy, spdy, Tor},
    814         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/uzunov2013torspdy.pdf},
    815         www_section = unsorted,
    816         url = {https://bibliography.gnunet.org},
    817         author = {Andrey Uzunov}
    818 }
    819 @article {Kermarrec2013,
    820         title = {Towards a Personalized Internet: a Case for a Full Decentralization},
    821         journal = {Philosophical Transactions. Series A, Mathematical, Physical, and Engineering Sciences},
    822         volume = {371},
    823         number = {1987},
    824         year = {2013},
    825         month = mar,
    826         abstract = {The Web has become a user-centric platform where users post, share, annotate, comment and forward content be it text, videos, pictures, URLs, etc. This social dimension creates tremendous new opportunities for information exchange over the Internet, as exemplified by the surprising and exponential growth of social networks and collaborative platforms. Yet, niche content is sometimes difficult to retrieve using traditional search engines because they target the mass rather than the individual. Likewise, relieving users from useless notification is tricky in a world where there is so much information and so little of interest for each and every one of us. We argue that ultra-specific content could be retrieved and disseminated should search and notification be personalized to fit this new setting. We also argue that users' interests should be implicitly captured by the system rather than relying on explicit classifications simply because the world is by nature unstructured, dynamic and users do not want to be hampered in their actions by a tight and static framework. In this paper, we review some existing personalization approaches, most of which are centralized. We then advocate the need for fully decentralized systems because personalization raises two main issues. Firstly, personalization requires information to be stored and maintained at a user granularity which can significantly hurt the scalability of a centralized solution. Secondly, at a time when the {\textquoteleft}big brother is watching you' attitude is prominent, users may be more and more reluctant to give away their personal data to the few large companies that can afford such personalization. We start by showing how to achieve personalization in decentralized systems and conclude with the research agenda ahead},
    827         issn = {1364-503X},
    828         doi = {10.1098/rsta.2012.0380},
    829         www_section = unsorted,
    830         url = {https://bibliography.gnunet.org},
    831         author = {Kermarrec, Anne-Marie}
    832 }
    833 @conference {2013_6,
    834         title = {Trawling for Tor Hidden Services: Detection, Measurement, Deanonymization},
    835         booktitle = {Security and Privacy (SP), 2013 IEEE Symposium on},
    836         year = {2013},
    837         www_section = {anonymity network, arbitrary hidden services, command and control channels, data privacy, deanonymize hidden services, DuckDuckGo search engine, hidden services, Internet, Internet service privacy, privacy, search engines, Silk Road, Tor, Tor hidden services, volunteer based anonymity network, volunteer operated relays},
    838         doi = {10.1109/SP.2013.15},
    839         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trawling_for_tor_HS.pdf},
    840         www_section = unsorted,
    841         url = {https://bibliography.gnunet.org},
    842         author = {Biryukov, A. and Pustogarov, I. and Weinmann, R.}
    843 }
    844 @conference {2013_7,
    845         title = {WhatsUp: A Decentralized Instant News Recommender},
    846         booktitle = {IEEE 27th International Symposium on Parallel & Distributed Processing},
    847         year = {2013},
    848         publisher = {IEEE},
    849         organization = {IEEE},
    850         abstract = {We present WHATSUP, a collaborative filtering system for disseminating news items in a large-scale dynamic setting with no central authority. WHATSUP constructs an implicit social network based on user profiles that express the opinions of users about the news items they receive (like-dislike). Users with similar tastes are clustered using a similarity metric reflecting long-standing and emerging (dis)interests. News items are disseminated through a novel heterogeneous gossip protocol that (1) biases the orientation of its targets towards those with similar interests, and (2) amplifies dissemination based on the level of interest in every news item. We report on an extensive evaluation of WHATSUP through (a) simulations, (b) a ModelNet emulation on a cluster, and (c) a PlanetLab deployment based on real datasets. We show that WHATSUP outperforms various alternatives in terms of accurate and complete delivery of relevant news items while preserving the fundamental advantages of standard gossip: namely, simplicity of deployment and robustness},
    851         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/whatsup.pdf},
    852         www_section = unsorted,
    853         url = {https://bibliography.gnunet.org},
    854         author = {Antoine Boutet and Davide Frey and Rachid Guerraoui and Arnaud Jegou and Anne-Marie Kermarrec}
    855 }
    856 @article {knight2012autonetkit,
    857         title = {AutoNetkit: simplifying large scale, open-source network experimentation},
    858         journal = {SIGCOMM Comput. Commun. Rev},
    859         volume = {42},
    860         number = {4},
    861         year = {2012},
    862         pages = {97--98},
    863         publisher = {ACM},
    864         address = {New York, NY, USA},
    865         www_section = {automated configuration, emulation, Network management},
    866         issn = {0146-4833},
    867         doi = {10.1145/2377677.2377699},
    868         url = {http://doi.acm.org/10.1145/2377677.2377699},
    869         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/autonetkit-small.pdf},
    870         author = {Knight, Simon and Jaboldinov, Askar and Maennel, Olaf and Phillips, Iain and Roughan, Matthew}
    871 }
    872 @book {2012_0,
    873         title = {BLIP: Non-interactive Differentially-Private Similarity Computation on Bloom filters},
    874         booktitle = {Stabilization, Safety, and Security of Distributed Systems},
    875         series = {Lecture Notes in Computer Science},
    876         volume = {7596},
    877         year = {2012},
    878         pages = {202--216},
    879         publisher = {Springer Berlin Heidelberg},
    880         organization = {Springer Berlin Heidelberg},
    881         abstract = {In this paper, we consider the scenario in which the profile of a user is represented in a compact way, as a Bloom filter, and the main objective is to privately compute in a distributed manner the similarity between users by relying only on the Bloom filter representation. In particular, we aim at providing a high level of privacy with respect to the profile even if a potentially unbounded number of similarity computations take place, thus calling for a non-interactive mechanism. To achieve this, we propose a novel non-interactive differentially private mechanism called BLIP (for BLoom-and-flIP) for randomizing Bloom filters. This approach relies on a bit flipping mechanism and offers high privacy guarantees while maintaining a small communication cost. Another advantage of this non-interactive mechanism is that similarity computation can take place even when the user is offline, which is impossible to achieve with interactive mechanisms. Another of our contributions is the definition of a probabilistic inference attack, called the {\textquotedblleft}Profile Reconstruction attack{\textquotedblright}, that can be used to reconstruct the profile of an individual from his Bloom filter representation. More specifically, we provide an analysis of the protection offered by BLIP against this profile reconstruction attack by deriving an upper and lower bound for the required value of the differential privacy parameter {\epsilon}},
    882         isbn = {978-3-642-33535-8},
    883         doi = {10.1007/978-3-642-33536-5_20},
    884         url = {http://dx.doi.org/10.1007/978-3-642-33536-5_20},
    885         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BLIP2012Alaggan.pdf},
    886         www_section = unsorted,
    887         author = {Alaggan, Mohammad and Gambs, S{\'e}bastien and Kermarrec, Anne-Marie},
    888         editor = {Richa, Andr{\'e}aW. and Scheideler, Christian}
    889 }
    890 @conference {congestion-tor12,
    891         title = {Congestion-aware Path Selection for Tor},
    892         booktitle = {FC'12--Proceedings of the 16th International Conference in Financial Cryptography and Data Security },
    893         year = {2012},
    894         month = feb,
    895         address = {Bonaire},
    896         abstract = {Tor, an anonymity network formed by volunteer nodes, uses the estimated bandwidth of the nodes as a central feature of its path selection algorithm. The current load on nodes is not considered in this algorithm, however, and we observe that some nodes persist in being under-utilized or congested. This can degrade the network's performance, discourage Tor adoption, and consequently reduce the size of Tor's anonymity set. In an effort to reduce congestion and improve load balancing, we propose a congestion-aware path selection algorithm. Using latency as an indicator of congestion, clients use opportunistic and lightweight active measurements to evaluate the congestion state of nodes, and reject nodes that appear congested. Through experiments conducted on the live Tor network, we verify our hypothesis that clients can infer congestion using latency and show that congestion-aware path selection can improve performance},
    897         www_section = {algorithms, Tor, volunteer nodes},
    898         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2712\%20-\%20Congestion-aware\%20Path\%20Selection\%20for\%20Tor.pdf},
    899         url = {https://bibliography.gnunet.org},
    900         author = {Tao Wang and Kevin Bauer and Clara Forero and Ian Goldberg}
    901 }
    902 @conference {2012_1,
    903         title = {CRISP: Collusion-resistant Incentive-compatible Routing and Forwarding in Opportunistic Networks},
    904         booktitle = {Proceedings of the 15th ACM International Conference on Modeling, Analysis and Simulation of Wireless and Mobile Systems},
    905         year = {2012},
    906         publisher = {ACM},
    907         organization = {ACM},
    908         address = {New York, NY, USA},
    909         www_section = {black-hole attack, collusion, credit schemes, delay tolerant networks, flooding, incentive schemes, mobile peer-to-peer networks, opportunistic networks},
    910         isbn = {978-1-4503-1628-6},
    911         doi = {10.1145/2387238.2387253},
    912         url = {http://doi.acm.org/10.1145/2387238.2387253},
    913         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crisp-mswim.pdf},
    914         author = {Sadiq, Umair and Kumar, Mohan and Wright, Matthew}
    915 }
    916 @article {DBLP:journals/corr/abs-1202-4503,
    917         title = {A Critical Look at Decentralized Personal Data Architectures},
    918         journal = {CoRR},
    919         volume = {abs/1202.4503},
    920         year = {2012},
    921         month = feb,
    922         abstract = {While the Internet was conceived as a decentralized network, the most widely used web applications today tend toward centralization. Control increasingly rests with centralized service providers who, as a consequence, have also amassed unprecedented amounts of data about the behaviors and personalities of individuals. Developers, regulators, and consumer advocates have looked to alternative decentralized architectures as the natural response to threats posed by these centralized services. The result has been a great variety of solutions that include personal data stores (PDS), infomediaries, Vendor Relationship Management (VRM) systems, and federated and distributed social networks. And yet, for all these efforts, decentralized personal data architectures have seen little adoption. This position paper attempts to account for these failures, challenging the accepted wisdom in the web community on the feasibility and desirability of these approaches. We start with a historical discussion of the development of various categories of decentralized personal data architectures. Then we survey the main ideas to illustrate the common themes among these efforts. We tease apart the design characteristics of these systems from the social values that they (are intended to) promote. We use this understanding to point out numerous drawbacks of the decentralization paradigm, some inherent and others incidental. We end with recommendations for designers of these systems for working towards goals that are achievable, but perhaps more limited in scope and ambition},
    923         www_section = {distributed social networks, economics, personal data stores, policy, privacy, web},
    924         url = {https://bibliography.gnunet.org},
    925         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoRR\%20-\%20Critical\%20look\%20at\%20decentralization.pdf},
    926         author = {Arvind Narayanan and Vincent Toubiana and Solon Barocas and Helen Nissenbaum and Dan Boneh}
    927 }
    928 @mastersthesis {2012_2,
    929         title = {Decentralized Evaluation of Regular Expressions for Capability Discovery in Peer-to-Peer Networks},
    930         volume = {M.S},
    931         year = {2012},
    932         month = nov,
    933         pages = {0--100},
    934         school = {Technische Universit{\"a}t M{\"u}nchen},
    935         type = {Masters},
    936         address = {Garching bei M{\"u}nchen},
    937         abstract = {This thesis presents a novel approach for decentralized evaluation of regular expressions for capability discovery in DHT-based overlays. The system provides support for announcing capabilities expressed as regular expressions and discovering participants offering adequate capabilities. The idea behind our approach is to convert regular expressions into finite automatons and store the corresponding states and transitions in a DHT. We show how locally constructed DFA are merged in the DHT into an NFA without the knowledge of any NFA already present in the DHT and without the need for any central authority. Furthermore we present options of optimizing the DFA. There exist several possible applications for this general approach of decentralized regular expression evaluation. However, in this thesis we focus on the application of discovering users that are willing to provide network access using a specified protocol to a particular destination. We have implemented the system for our proposed approach and conducted a simulation. Moreover we present the results of an emulation of the implemented system in a cluster},
    938         www_section = {DFA, distributed hash table, GNUnet, NFA, regular expressions, search},
    939         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/szengel2012ms.pdf},
    940         url = {https://bibliography.gnunet.org},
    941         author = {Maximilian Szengel}
    942 }
    943 @mastersthesis {2012_3,
    944         title = {Design and Implementation of a Censorship Resistant and Fully Decentralized Name System},
    945         volume = {M.Sc},
    946         year = {2012},
    947         month = sep,
    948         pages = {0--116},
    949         school = {TU Munich},
    950         type = {Master's},
    951         address = {Garching bei M{\"u}nchen},
    952         abstract = {This thesis presents the design and implementation of the GNU Alternative Domain System (GADS), a decentralized, secure name system providing memorable names for the Internet as an alternative to the Domain Name System (DNS).  The system builds on ideas from Rivest's Simple Distributed Security Infrastructure (SDSI) to address a central issue with providing a decentralized mapping of secure identifiers to memorable names: providing a global, secure and memorable mapping is impossible without a trusted authority.  SDSI offers an alternative by linking local name spaces; GADS uses the transitivity provided by the SDSI design to build a decentralized and censorship resistant name system without a trusted root based on secure  delegation of authority. Additional details need to be considered in order to enable GADS to integrate smoothly with the World Wide Web.  While following links on the Web matches following delegations in GADS, the existing HTTP-based infrastructure makes many assumptions about globally unique names; however, proxies can be used to enable legacy applications to function with GADS. This work presents the fundamental goals and ideas behind GADS, provides technical details on how GADS has been implemented and discusses deployment issues for using GADS with existing systems.  We discuss how GADS and legacy DNS can interoperate during a transition period and what additional security advantages GADS offers over DNS with Security Extensions (DNSSEC).  Finally, we present the results of a survey into surfing behavior, which suggests that the manual introduction of new direct links in GADS will be infrequent},
    953         www_section = {censorship resistance, decentralized, DNS, GNU Name System, GNUnet},
    954         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/schanzen2012msc.pdf},
    955         url = {https://bibliography.gnunet.org},
    956         author = {Martin Schanzenbach}
    957 }
    958 @book {2012_4,
    959         title = {Differential Privacy with Imperfect Randomness},
    960         booktitle = {Advances in Cryptology -- CRYPTO 2012},
    961         series = {Lecture Notes in Computer Science},
    962         volume = {7417},
    963         year = {2012},
    964         pages = {497--516},
    965         publisher = {Springer Berlin Heidelberg},
    966         organization = {Springer Berlin Heidelberg},
    967         abstract = {In this work we revisit the question of basing cryptography on imperfect randomness. Bosley and Dodis (TCC'07) showed that if a source of randomness R is {\textquotedblleft}good enough{\textquotedblright} to generate a secret key capable of encrypting k bits, then one can deterministically extract nearly k almost uniform bits from R, suggesting that traditional privacy notions (namely, indistinguishability of encryption) requires an {\textquotedblleft}extractable{\textquotedblright} source of randomness. Other, even stronger impossibility results are known for achieving privacy under specific {\textquotedblleft}non-extractable{\textquotedblright} sources of randomness, such as the {\gamma}-Santha-Vazirani (SV) source, where each next bit has fresh entropy, but is allowed to have a small bias {\gamma} < 1 (possibly depending on prior bits).
    968 We ask whether similar negative results also hold for a more recent notion of privacy called differential privacy (Dwork et al., TCC'06), concentrating, in particular, on achieving differential privacy with the Santha-Vazirani source. We show that the answer is no. Specifically, we give a differentially private mechanism for approximating arbitrary {\textquotedblleft}low sensitivity{\textquotedblright} functions that works even with randomness coming from a {\gamma}-Santha-Vazirani source, for any {\gamma} < 1. This provides a somewhat surprising {\textquotedblleft}separation{\textquotedblright} between traditional privacy and differential privacy with respect to imperfect randomness.
    969 Interestingly, the design of our mechanism is quite different from the traditional {\textquotedblleft}additive-noise{\textquotedblright} mechanisms (e.g., Laplace mechanism) successfully utilized to achieve differential privacy with perfect randomness. Indeed, we show that any (non-trivial) {\textquotedblleft}SV-robust{\textquotedblright} mechanism for our problem requires a demanding property called consistent sampling, which is strictly stronger than differential privacy, and cannot be satisfied by any additive-noise mechanism},
    970         isbn = {978-3-642-32008-8},
    971         doi = {10.1007/978-3-642-32009-5_29},
    972         url = {http://dx.doi.org/10.1007/978-3-642-32009-5_29},
    973         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DPwithImperfectRandomness2012Dodis.pdf},
    974         www_section = unsorted,
    975         author = {Dodis, Yevgeniy and L{\'o}pez-Alt, Adriana and Mironov, Ilya and Vadhan, Salil},
    976         editor = {Safavi-Naini, Reihaneh and Canetti, Ran}
    977 }
    978 @article {2012_5,
    979         title = {Efficient and Secure Decentralized Network Size Estimation},
    980         year = {2012},
    981         month = may,
    982         institution = {Technische Universit{\"a}t M{\"u}nchen},
    983         address = {Garching bei M{\"u}nchen},
    984         abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for
    985 performance tuning of P2P routing algorithms.  This paper introduces
    986 and evaluates a new efficient method for participants in an
    987 unstructured P2P network to establish the size of the overall network.
    988 The presented method is highly efficient, propagating information
    989 about the current size of the network to all participants using
    990 O(|E|) operations where |E| is the number of edges in the network.
    991 Afterwards, all nodes have the same network size estimate, which can
    992 be made arbitrarily accurate by averaging results from multiple rounds
    993 of the protocol.  Security measures are included which make it
    994 prohibitively expensive for a typical active participating adversary
    995 to significantly manipulate the estimates.  This paper includes
    996 experimental results that demonstrate the viability, efficiency and
    997 accuracy of the protocol},
    998         www_section = {GNUnet, network security, network size estimation, peer-to-peer networking},
    999         journal = {unknown},
   1000         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nse-techreport.pdf},
   1001         url = {https://bibliography.gnunet.org},
   1002         author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff}
   1003 }
   1004 @conference {2012_6,
   1005         title = {Efficient and Secure Decentralized Network Size Estimation},
   1006         booktitle = {IFIP International Conferences on Networking (Networking 2012)},
   1007         year = {2012},
   1008         month = may,
   1009         pages = {304--317},
   1010         publisher = {Springer Verlag},
   1011         organization = {Springer Verlag},
   1012         address = {Prague, CZ},
   1013         abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for performance tuning of P2P routing algorithms.  This paper introduces and evaluates a new efficient method for participants in an unstructured P2P network to establish the size of the overall network. The presented method is highly efficient, propagating information about the current size of the network to all participants using O(|E|) operations where |E| is the number of edges in the network. Afterwards, all nodes have the same network size estimate, which can be made arbitrarily accurate by averaging results from multiple rounds of the protocol.  Security measures are included which make it prohibitively expensive for a typical active participating adversary to significantly manipulate the estimates.  This paper includes experimental results that demonstrate the viability, efficiency and accuracy of the protocol},
   1014         www_section = {byzantine fault tolerance, GNUnet, network size estimation, proof of work},
   1015         url = {http://grothoff.org/christian/rrsize2012.pdf},
   1016         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper-ifip.pdf},
   1017         author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff}
   1018 }
   1019 @conference {gossipico2012,
   1020         title = {Gossip-based counting in dynamic networks},
   1021         booktitle = {IFIP International Conferences on Networking (Networking 2012)},
   1022         year = {2012},
   1023         month = may,
   1024         pages = {404--419},
   1025         publisher = {Springer Verlag},
   1026         organization = {Springer Verlag},
   1027         address = {Prague, CZ},
   1028         www_section = {network size estimation},
   1029         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Gossipico.pdf},
   1030         url = {https://bibliography.gnunet.org},
   1031         author = {Ruud van de Bovenkamp and Fernando Kuipers and Piet Van Mieghem}
   1032 }
   1033 @conference {DBLP:conf/tridentcom/HermenierR12,
   1034         title = {How to Build a Better Testbed: Lessons from a Decade of Network Experiments on Emulab},
   1035         booktitle = {TRIDENTCOM},
   1036         year = {2012},
   1037         pages = {287--304},
   1038         www_section = {emulab, emulation, testbed},
   1039         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/how-to-build-a-better-testbed.pdf},
   1040         url = {https://bibliography.gnunet.org},
   1041         author = {Fabien Hermenier and Robert Ricci}
   1042 }
   1043 @conference {2012_7,
   1044         title = {Koi: A Location-Privacy Platform for Smartphone Apps},
   1045         booktitle = {Proceedings of the 9th Symposium on Networked Systems Design and Implementation (NSDI)},
   1046         year = {2012},
   1047         month = apr,
   1048         address = {San Jose, CA},
   1049         abstract = {With mobile phones becoming first-class citizens in the online world, the rich location data they bring to the table is set to revolutionize all aspects of online life including content delivery, recommendation systems, and advertising. However, user-tracking is a concern with such location-based services, not only because location data can be linked uniquely to individuals, but because the low-level nature of current location APIs and the resulting dependence on the cloud to synthesize useful representations virtually guarantees such tracking.
   1050 In this paper, we propose privacy-preserving location-based matching as a fundamental platform primitive and as an alternative to exposing low-level, latitude-longitude (lat-long) coordinates to applications. Applications set rich location-based triggers and have these be fired based on location updates either from the local device or from a remote device (e.g., a friend's phone). Our Koi platform, comprising a privacy-preserving matching service in the cloud and a phone-based agent, realizes this primitive across multiple phone and browser platforms. By masking low-level lat-long information from applications, Koi not only avoids leaking privacy-sensitive information, it also eases the task of programmers by providing a higher-level abstraction that is easier for applications to build upon. Koi's privacy-preserving protocol prevents the cloud service from tracking users. We verify the non-tracking properties of Koi using a theorem prover, illustrate how privacy guarantees can easily be added to a wide range of location-based applications, and show that our public deployment is performant, being able to perform 12K matches per second on a single core},
   1051         www_section = {location privacy, matching},
   1052         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nsdi12-koi.pdf},
   1053         url = {https://bibliography.gnunet.org},
   1054         author = {Saikat Guha and Mudit Jain and Venkata Padmanabhan}
   1055 }
   1056 @conference {oakland2012-lap,
   1057         title = {LAP: Lightweight Anonymity and Privacy},
   1058         booktitle = {Proceedings of the 2012 IEEE Symposium on Security and Privacy},
   1059         year = {2012},
   1060         month = may,
   1061         publisher = {IEEE Computer Society},
   1062         organization = {IEEE Computer Society},
   1063         address = {San Francisco, CA, USA},
   1064         abstract = {Popular anonymous communication systems often require sending packets through a sequence of relays on dilated paths for strong anonymity protection. As a result, increased end-to-end latency renders such systems inadequate for the majority of Internet users who seek an intermediate level of anonymity protection while using latency-sensitive applications, such as Web applications. This paper serves to bridge the gap between communication systems that provide strong anonymity protection but with intolerable latency and non-anonymous communication systems by considering a new design space for the setting. More specifically, we explore how to achieve near-optimal latency while achieving an intermediate level of anonymity with a weaker yet practical adversary model (i.e., protecting an end-host's identity and location from servers)
   1065 such that users can choose between the level of anonymity and usability. We propose Lightweight Anonymity and Privacy (LAP), an efficient network-based solution featuring lightweight path establishment and stateless communication, by concealing an end-host's topological location to enhance anonymity against
   1066 remote tracking. To show practicality, we demonstrate that LAP can work on top of the current Internet and proposed future Internet architectures},
   1067         www_section = {anonymous communication anonymity protection, LAP},
   1068         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LAP\%3A\%20Lightweight\%20Anonymity\%20and\%20Privacy.pdf},
   1069         url = {https://bibliography.gnunet.org},
   1070         author = {Hsu-Chun Hsiao and Tiffany Hyun-Jin Kim and Adrian Perrig and Akira Yamada and Sam Nelson and Marco Gruteser and Wei Ming}
   1071 }
   1072 @conference {oakland2012-lastor,
   1073         title = {LASTor: A Low-Latency AS-Aware Tor Client},
   1074         booktitle = {Proceedings of the 2012 IEEE Symposium on Security and Privacy},
   1075         year = {2012},
   1076         month = may,
   1077         publisher = {IEEE Computer Society},
   1078         organization = {IEEE Computer Society},
   1079         address = {San Francisco, CA, USA},
   1080         abstract = {The widely used Tor anonymity network is designed
   1081 to enable low-latency anonymous communication. However, in
   1082 practice, interactive communication on Tor{\textemdash}which accounts for
   1083 over 90\% of connections in the Tor network [1]{\textemdash}incurs latencies
   1084 over 5x greater than on the direct Internet path. In addition, since
   1085 path selection to establish a circuit in Tor is oblivious to Internet
   1086 routing, anonymity guarantees can breakdown in cases where an
   1087 autonomous system (AS) can correlate traffic across the entry
   1088 and exit segments of a circuit.
   1089 In this paper, we show that both of these shortcomings in Tor
   1090 can be addressed with only client-side modifications, i.e., without
   1091 requiring a revamp of the entire Tor architecture. To this end,
   1092 we design and implement a new Tor client, LASTor. First, we
   1093 show that LASTor can deliver significant latency gains over the
   1094 default Tor client by simply accounting for the inferred locations
   1095 of Tor relays while choosing paths. Second, since the preference
   1096 for low latency paths reduces the entropy of path selection,
   1097 we design LASTor's path selection algorithm to be tunable. A
   1098 user can choose an appropriate tradeoff between latency and
   1099 anonymity by specifying a value between 0 (lowest latency) and
   1100 1 (highest anonymity) for a single parameter. Lastly, we develop
   1101 an efficient and accurate algorithm to identify paths on which
   1102 an AS can correlate traffic between the entry and exit segments.
   1103 This algorithm enables LASTor to avoid such paths and improve a
   1104 user's anonymity, while the low runtime of the algorithm ensures
   1105 that the impact on end-to-end latency of communication is low.
   1106 By applying our techniques to measurements of real Internet
   1107 paths and by using LASTor to visit the top 200 websites from
   1108 several geographically-distributed end-hosts, we show that, in
   1109 comparison to the default Tor client, LASTor reduces median
   1110 latencies by 25\% while also reducing the false negative rate of
   1111 not detecting a potential snooping AS from 57\% to 11\%},
   1112         www_section = {anonymous communication, as, autonomous system, Tor},
   1113         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LASTor\%3A\%20A\%20Low-Latency\%20AS-Aware\%20Tor\%20Client.pdf},
   1114         url = {https://bibliography.gnunet.org},
   1115         author = {Masoud Akhoondi and Curtis Yu and Harsha V. Madhyastha}
   1116 }
   1117 @book {2012_8,
   1118         title = {Lower Bounds in Differential Privacy},
   1119         booktitle = {Theory of Cryptography},
   1120         series = {Lecture Notes in Computer Science},
   1121         volume = {7194},
   1122         year = {2012},
   1123         pages = {321--338},
   1124         publisher = {Springer Berlin Heidelberg},
   1125         organization = {Springer Berlin Heidelberg},
   1126         abstract = {This paper is about private data analysis, in which a trusted curator holding a confidential database responds to real vector-valued queries. A common approach to ensuring privacy for the database elements is to add appropriately generated random noise to the answers, releasing only these noisy responses. A line of study initiated in [7] examines the amount of distortion needed to prevent privacy violations of various kinds. The results in the literature vary according to several parameters, including the size of the database, the size of the universe from which data elements are drawn, the {\textquotedblleft}amount{\textquotedblright} of privacy desired, and for the purposes of the current work, the arity of the query. In this paper we sharpen and unify these bounds. Our foremost result combines the techniques of Hardt and Talwar [11] and McGregor et al. [13] to obtain linear lower bounds on distortion when providing differential privacy for a (contrived) class of low-sensitivity queries. (A query has low sensitivity if the data of a single individual has small effect on the answer.) Several structural results follow as immediate corollaries:
   1127 We separate so-called counting queries from arbitrary low-sensitivity queries, proving the latter requires more noise, or distortion, than does the former;
   1128 We separate ({\epsilon},0)-differential privacy from its well-studied relaxation ({\epsilon},{\delta})-differential privacy, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database, proving the latter requires less distortion than the former;
   1129 We demonstrate that ({\epsilon},{\delta})-differential privacy is much weaker than ({\epsilon},0)-differential privacy in terms of mutual information of the transcript of the mechanism with the database, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database.
   1130 We also simplify the lower bounds on noise for counting queries in [11] and also make them unconditional. Further, we use a characterization of ({\epsilon},{\delta}) differential privacy from [13] to obtain lower bounds on the distortion needed to ensure ({\epsilon},{\delta})-differential privacy for {\epsilon},{\delta} > 0. We next revisit the LP decoding argument of [10] and combine it with a recent result of Rudelson [15] to improve on a result of Kasiviswanathan et al. [12] on noise lower bounds for privately releasing l-way marginals},
   1131         www_section = {Differential Privacy, LP decoding},
   1132         isbn = {978-3-642-28913-2},
   1133         doi = {10.1007/978-3-642-28914-9_18},
   1134         url = {http://dx.doi.org/10.1007/978-3-642-28914-9_18},
   1135         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LowerBoundsDP2012De.pdf},
   1136         author = {De, Anindya},
   1137         editor = {Cramer, Ronald}
   1138 }
   1139 @article {rossi2012modelnet,
   1140         title = {ModelNet-TE: An emulation tool for the study of P2P and traffic engineering interaction dynamics},
   1141         journal = {Peer-to-Peer Networking and Applications},
   1142         year = {2012},
   1143         pages = {1--19},
   1144         publisher = {Springer},
   1145         www_section = {emulation, ModelNet, P2P emulation, traffic engineering},
   1146         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/modelnet-si-ppna11.pdf},
   1147         url = {https://bibliography.gnunet.org},
   1148         author = {Rossi, D. and Veglia, P. and Sammarco, M. and Larroca, F.}
   1149 }
   1150 @mastersthesis {2012_9,
   1151         title = {Monkey: Automated debugging of deployed distributed systems},
   1152         volume = {M.S},
   1153         year = {2012},
   1154         month = jul,
   1155         pages = {0--78},
   1156         school = {Technische Universit{\"a}t M{\"u}nchen},
   1157         type = {Masters},
   1158         address = {Garching bei M{\"u}nchen},
   1159         abstract = {Debugging is tedious and time consuming work that, for certain types of bugs, can and should be automated. Debugging distributed systems is more complex due to time dependencies between interacting processes. Another related problem is duplicate bug reports in bug repositories. Finding bug duplicates is hard and wastes developers' time which may affect the development team's rate of bug fixes and new releases.
   1160 In this master thesis we introduce Monkey, a new tool that provides a solution for automated classification, investigation and characterization of bugs, as well as a solution for comparing bug reports and avoiding duplicates. Our tool is particularly suitable for distributed systems due to its autonomy. We present Monkey's key design goals and architecture and give experimental results demonstrating the viability of our approach},
   1161         www_section = {automation, debugging, distributed systems},
   1162         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/safey-thesis-monkey.pdf , https://git.gnunet.org/bibliography.git/plain/docs/safey-presentation-monkey.pdf},
   1163         url = {https://bibliography.gnunet.org},
   1164         author = {Safey A. Halim}
   1165 }
   1166 @conference {2012_10,
   1167         title = {NTALG--TCP NAT traversal with application-level gateways},
   1168         booktitle = {Consumer Communications and Networking Conference (CCNC), 2012 IEEE},
   1169         year = {2012},
   1170         abstract = {Consumer computers or home communication devices are usually connected to the Internet via a Network Address Translation (NAT) router. This imposes restrictions for networking applications that require inbound connections.
   1171 Existing solutions for NAT traversal can remedy the restrictions, but still there is a fraction of home users which lack support of it, especially when it comes to TCP. We present a framework
   1172 for traversing NAT routers by exploiting their built-in FTP and IRC application-level gateways (ALG) for arbitrary TCP-based applications. While this does not work in every scenario, it
   1173 significantly improves the success chance without requiring any user interaction at all. To demonstrate the framework, we show
   1174 a small test setup with laptop computers and home NAT routers},
   1175         www_section = {FTP-ALG, NAT},
   1176         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WHW_12-NTALG.pdf},
   1177         url = {https://bibliography.gnunet.org},
   1178         author = {Wander, M. and Holzapfel, S. and Wacker, A. and Weis, T.}
   1179 }
   1180 @article {2012_11,
   1181         title = {Octopus: A Secure and Anonymous DHT Lookup},
   1182         journal = {CoRR},
   1183         volume = {abs/1203.2668},
   1184         year = {2012},
   1185         www_section = {anonymity, distributed hash table},
   1186         url = {http://dblp.uni-trier.de/db/journals/corr/corr1203.html$\#$abs-1203-2668},
   1187         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/octopus_dht.pdf},
   1188         author = {Wang, Qiyan and Borisov, Nikita}
   1189 }
   1190 @conference {oakland2012-peekaboo,
   1191         title = {Peek-a-Boo, I Still See You: Why Efficient Traffic Analysis Countermeasures Fail},
   1192         booktitle = {Proceedings of the 2012 IEEE Symposium on Security and Privacy},
   1193         year = {2012},
   1194         month = may,
   1195         publisher = {IEEE Computer Society},
   1196         organization = {IEEE Computer Society},
   1197         address = {San Francisco, CA, USA},
   1198         abstract = {We consider the setting of HTTP traffic over encrypted tunnels, as used to conceal the identity of websites visited by a user. It is well known that traffic analysis (TA) attacks can accurately identify the website a user visits despite the use of encryption, and previous work has looked at specific attack/countermeasure pairings. We provide the first comprehensive analysis of general-purpose TA countermeasures. We show that nine known countermeasures are vulnerable to simple attacks that exploit coarse features of traffic (e.g., total time and bandwidth). The considered countermeasures
   1199 include ones like those standardized by TLS, SSH, and IPsec, and even more complex ones like the traffic morphing scheme of Wright et al. As just one of our results, we show that despite the use of traffic morphing, one can use only
   1200 total upstream and downstream bandwidth to identify {\textemdash}with 98\% accuracy{\textemdash} which of two websites was visited. One implication of what we find is that, in the context of website identification, it is unlikely that bandwidth-efficient, general-
   1201 purpose TA countermeasures can ever provide the type of security targeted in prior work},
   1202         www_section = {encrypted traffic, machine learning, padding, privacy, traffic analysis countermeasures},
   1203         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Peek-a-Boo\%2C\%20I\%20Still\%20See\%20You\%3A\%20Why\%20Efficient\%20Traffic\%20Analysis\%20Countermeasures\%20Fail.pdf},
   1204         url = {https://bibliography.gnunet.org},
   1205         author = {Kevin P. Dyer and Scott Coull and Thomas Ristenpart and Thomas Shrimpton}
   1206 }
   1207 @article {2012_12,
   1208         title = {Personalization and privacy: a survey of privacy risks and remedies in personalization-based systems},
   1209         journal = {User Modeling and User-Adapted Interaction},
   1210         volume = {22},
   1211         year = {2012},
   1212         pages = {203--220},
   1213         abstract = {Personalization technologies offer powerful tools for enhancing the user experience in a wide variety of systems, but at the same time raise new privacy concerns. For example, systems that personalize advertisements according to the physical location of the user or according to the user's friends' search history, introduce new privacy risks that may discourage wide adoption of personalization technologies. This article analyzes the privacy risks associated with several current and prominent personalization trends, namely social-based personalization, behavioral profiling, and location-based personalization. We survey user attitudes towards privacy and personalization, as well as technologies that can help reduce privacy risks. We conclude with a discussion that frames risks and technical solutions in the intersection between personalization and privacy, as well as areas for further investigation. This frameworks can help designers and researchers to contextualize privacy challenges of solutions when designing personalization systems},
   1214         www_section = {e-commerce, Human--computer interaction, Location-based services, Personalization, privacy, social networks},
   1215         issn = {0924-1868},
   1216         doi = {10.1007/s11257-011-9110-z},
   1217         url = {http://dx.doi.org/10.1007/s11257-011-9110-z},
   1218         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Personalization2012Toch.pdf},
   1219         author = {Toch, Eran and Wang, Yang and Cranor, LorrieFaith}
   1220 }
   1221 @conference {DBLP:conf/focs/DworkNV12,
   1222         title = {The Privacy of the Analyst and the Power of the State},
   1223         booktitle = {The Privacy of the Analyst and the Power of the State},
   1224         year = {2012},
   1225         pages = {400--409},
   1226         www_section = unsorted,
   1227         url = {https://bibliography.gnunet.org},
   1228         author = {Cynthia Dwork and Moni Naor and Salil P. Vadhan}
   1229 }
   1230 @mastersthesis {moin:tel-00724121,
   1231         title = {Recommendation and Visualization Techniques for Large Scale Data},
   1232         year = {2012},
   1233         month = {July},
   1234         school = {Universit{\'e} Rennes 1},
   1235         type = {phd},
   1236         www_section = unsorted,
   1237         url = {https://bibliography.gnunet.org},
   1238         author = {Moin, Afshin}
   1239 }
   1240 @article {handigol2012reproducible,
   1241         title = {Reproducible network experiments using container based emulation},
   1242         journal = {Proc. CoNEXT},
   1243         year = {2012},
   1244         www_section = {emulation, mininet, network, virtualization},
   1245         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mininet-hifi.pdf},
   1246         url = {https://bibliography.gnunet.org},
   1247         author = {Handigol, N. and Heller, B. and Jeyakumar, V. and Lantz, B. and McKeown, N.}
   1248 }
   1249 @article {2012_13,
   1250         title = {Saturn: Range Queries, Load Balancing and Fault Tolerance in DHT Data Systems},
   1251         journal = {IEEE Transactions on Knowledge and Data Engineering},
   1252         volume = {24},
   1253         year = {2012},
   1254         month = jul,
   1255         chapter = {1313},
   1256         abstract = {In this paper, we present Saturn, an overlay architecture for large-scale data networks maintained over Distributed Hash
   1257 Tables (DHTs) that efficiently processes range queries and ensures access load balancing and fault-tolerance. Placing consecutive
   1258 data values in neighboring peers is desirable in DHTs since it accelerates range query processing; however, such a placement is highly
   1259 susceptible to load imbalances. At the same time, DHTs may be susceptible to node departures/failures and high data availability and
   1260 fault tolerance are significant issues. Saturn deals effectively with these problems through the introduction of a novel multiple ring,
   1261 order-preserving architecture. The use of a novel order-preserving hash function ensures fast range query processing. Replication
   1262 across and within data rings (termed vertical and horizontal replication) forms the foundation over which our mechanisms are
   1263 developed, ensuring query load balancing and fault tolerance, respectively. Our detailed experimentation study shows strong gains in
   1264 range query processing efficiency, access load balancing, and fault tolerance, with low replication overheads. The significance of
   1265 Saturn is not only that it effectively tackles all three issues together{\textemdash}i.e., supporting range queries, ensuring load balancing, and
   1266 providing fault tolerance over DHTs{\textemdash}but also that it can be applied on top of any order-preserving DHT enabling it to dynamically
   1267 handle replication and, thus, to trade off replication costs for fair load distribution and fault tolerance},
   1268         www_section = {distributed hash table, load balancing, range queries, Saturn},
   1269         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saturn-range-dht.pdf},
   1270         url = {https://bibliography.gnunet.org},
   1271         author = {Theoni Pitoura and Nikos Ntarmos and Peter Triantafillou}
   1272 }
   1273 @article {2012_14,
   1274         title = {The state-of-the-art in personalized recommender systems for social networking},
   1275         journal = {Artificial Intelligence Review},
   1276         volume = {37},
   1277         year = {2012},
   1278         pages = {119--132},
   1279         abstract = {With the explosion of Web 2.0 application such as blogs, social and professional networks, and various other types of social media, the rich online information and various new sources of knowledge flood users and hence pose a great challenge in terms of information overload. It is critical to use intelligent agent software systems to assist users in finding the right information from an abundance of Web data. Recommender systems can help users deal with information overload problem efficiently by suggesting items (e.g., information and products) that match users' personal interests. The recommender technology has been successfully employed in many applications such as recommending films, music, books, etc. The purpose of this report is to give an overview of existing technologies for building personalized recommender systems in social networking environment, to propose a research direction for addressing user profiling and cold start problems by exploiting user-generated content newly available in Web 2.0},
   1280         www_section = {recommender systems, Social networking, trust, User generated content, user profiles},
   1281         issn = {0269-2821},
   1282         doi = {10.1007/s10462-011-9222-1},
   1283         url = {http://dx.doi.org/10.1007/s10462-011-9222-1},
   1284         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PersonalizedRecommender2012Zhou.pdf},
   1285         author = {Zhou, Xujuan and Xu, Yue and Li, Yuefeng and Josang, Audun and Cox, Clive}
   1286 }
   1287 @article {2012_15,
   1288         title = {A Survey of Monte Carlo Tree Search Methods},
   1289         journal = {IEEE Transactions on Computational Intelligence and AI in Games},
   1290         volume = {4},
   1291         year = {2012},
   1292         month = mar,
   1293         pages = {1--43},
   1294         abstract = {Monte Carlo tree search (MCTS) is a recently proposed search method that combines the precision of tree search with the generality of random sampling. It has received considerable interest due to its spectacular success in the difficult problem of computer Go, but has also proved beneficial in a range of other domains. This paper is a survey of the literature to date, intended to provide a snapshot of the state of the art after the first five years of MCTS research. We outline the core algorithm's derivation, impart some structure on the many variations and enhancements that have been proposed, and summarize the results from the key game and nongame domains to which MCTS methods have been applied. A number of open research questions indicate that the field is ripe for future work},
   1295         www_section = {AI, artificial intelligence, bandit-based methods, computer go., game search, MCTS, monte carlo tree search, UCB, UCT, upper confidence bounds, upper confidence bounds for trees},
   1296         issn = {1943-068X},
   1297         doi = {10.1109/TCIAIG.2012.2186810},
   1298         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Browne\%20et\%20al\%20-\%20A\%20survey\%20of\%20MCTS\%20methods.pdf},
   1299         url = {https://bibliography.gnunet.org},
   1300         author = {Cameron Browne and Edward Powley and Daniel Whitehouse and Simon Lucas and Peter I. Cowling and Philipp Rohlfshagen and Stephen Tavener and Diego Perez and Spyridon Samothrakis and Simon Colton}
   1301 }
   1302 @article {2012_16,
   1303         title = {Theory and Practice of Bloom Filters for Distributed Systems},
   1304         journal = {Communications Surveys Tutorials, IEEE},
   1305         volume = {14},
   1306         year = {2012},
   1307         month = jan,
   1308         pages = {131--155},
   1309         abstract = {Many network solutions and overlay networks utilize probabilistic techniques to reduce information processing and networking costs. This survey article presents a number of frequently used and useful probabilistic techniques. Bloom filters and their variants are of prime importance, and they are heavily used in various distributed systems. This has been reflected in recent research and many new algorithms have been proposed for distributed systems that are either directly or indirectly based on Bloom filters. In this survey, we give an overview of the basic and advanced techniques, reviewing over 20 variants and discussing their application in distributed systems, in particular for caching, peer-to-peer systems, routing and forwarding, and measurement data summarization},
   1310         www_section = {Arrays, Bismuth, bloom filters, distributed systems, Filtering theory, filters, Fingerprint recognition, forwarding, information processing, measurement data summarization, networking costs, overlay networks, Peer to peer computing, peer-to-peer computing, Peer-to-peer systems, Probabilistic logic, probabilistic structures, probabilistic techniques, probability, routing, telecommunication network routing},
   1311         issn = {1553-877X},
   1312         doi = {10.1109/SURV.2011.031611.00024},
   1313         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TheoryandPracticeBloomFilter2011Tarkoma.pdf},
   1314         www_section = unsorted,
   1315         url = {https://bibliography.gnunet.org},
   1316         author = {Tarkoma, S. and Rothenberg, C.E. and Lagerspetz, E.}
   1317 }
   1318 @conference {2012_17,
   1319         title = {User Interests Driven Web Personalization Based on Multiple Social Networks},
   1320         booktitle = {Proceedings of the 4th International Workshop on Web Intelligence \&\#38; Communities},
   1321         year = {2012},
   1322         publisher = {ACM},
   1323         organization = {ACM},
   1324         address = {New York, NY, USA},
   1325         abstract = {User related data indicate user interests in a certain environment. In the context of massive data from the Web, if an application wants to provide more personalized service (e.g. search) for users, an investigation on user interests is needed. User interests are usually distributed in different sources. In order to provide a more comprehensive understanding, user related data from multiple sources need to be integrated together for deeper analysis. Web based social networks have become typical platforms for extracting user interests. In addition, there are various types of interests from these social networks. In this paper, we provide an algorithmic framework for retrieving semantic data based on user interests from multiple sources (such as multiple social networking sites). We design several algorithms to deal with interests based retrieval based on single and multiple types of interests. We utilize publication data from Semantic Web Dog Food (which can be considered as an academic collaboration based social network), and microblogging data from Twitter to validate our framework. The Active Academic Visit Recommendation Application (AAVRA) is developed as a concrete usecase to show the potential effectiveness of the proposed framework for user interests driven Web personalization based on multiple social networks},
   1326         www_section = {interest analysis, search refinement, web personalization},
   1327         isbn = {978-1-4503-1189-2},
   1328         doi = {10.1145/2189736.2189749},
   1329         url = {http://doi.acm.org/10.1145/2189736.2189749},
   1330         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WebPersonalization2012Zeng.pdf},
   1331         author = {Zeng, Yi and Zhong, Ning and Ren, Xu and Wang, Yan}
   1332 }
   1333 @conference {pets2011-bagai,
   1334         title = {An Accurate System-Wide Anonymity Metric for Probabilistic Attacks},
   1335         booktitle = {PETS'11--Proceedings of the 11th Privacy Enhancing Technologies Symposium},
   1336         year = {2011},
   1337         month = jul,
   1338         address = {Waterloo, Canada},
   1339         abstract = {We give a critical analysis of the system-wide anonymity metric of Edman et al. [3], which is based on the permanent value of a doubly-stochastic matrix. By providing an intuitive understanding of the permanent of such a matrix, we show that a metric that looks no further than this composite value is at best a rough indicator of anonymity. We identify situations where its inaccuracy is acute, and reveal a better anonymity indicator. Also, by constructing an information-preserving embedding of a smaller class of attacks into the wider class for which this metric was proposed, we show that this metric fails to possess desirable
   1340 generalization properties. Finally, we present a new anonymity metric that does not exhibit these shortcomings. Our new metric is accurate as well as general},
   1341         www_section = {combinatorial matrix theory, probabilistic attacks, system-wide anonymity metric},
   1342         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PETS\%2711\%20-\%20An\%20Accurate\%20System-Wide\%20Anonymity\%20Metric\%20for\%20Probabilistic\%20Attacks.pdf},
   1343         url = {https://bibliography.gnunet.org},
   1344         author = {Rajiv Bagai and Huabo Lu and Rong Li and Bin Tang}
   1345 }
   1346 @conference { cset2011evans,
   1347         title = {Beyond Simulation: Large-Scale Distributed Emulation of P2P Protocols},
   1348         booktitle = {4th Workshop on Cyber Security Experimentation and Test (CSET 2011)},
   1349         year = {2011},
   1350         publisher = {USENIX Association},
   1351         organization = {USENIX Association},
   1352         address = {San Francisco, California},
   1353         abstract = {This paper presents details on the design and implementation of a scalable framework for evaluating peer-to-peer protocols.  Unlike systems based on simulation, emulation-based systems enable the experimenter to obtain data that reflects directly on the concrete implementation in much greater detail.  This paper argues that emulation is a better model for experiments with peer-to-peer protocols since it can provide scalability and high flexibility while eliminating the cost of moving from experimentation to  deployment.  We discuss our unique experience with large-scale emulation using the GNUnet peer-to-peer framework and provide experimental results to support these claims },
   1354         www_section = {distributed hash table, emulation, GNUnet, scalability, security analysis},
   1355         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cset2011.pdf},
   1356         url = {https://bibliography.gnunet.org},
   1357         author = {Nathan S Evans and Christian Grothoff}
   1358 }
   1359 @conference {bnymble11,
   1360         title = {BNymble: More anonymous blacklisting at almost no cost},
   1361         booktitle = {FC'11--Proceedings of Financial Cryptography and Data Security },
   1362         year = {2011},
   1363         month = feb,
   1364         address = {St. Lucia},
   1365         abstract = {Anonymous blacklisting schemes allow online service providers to prevent future anonymous access by abusive users while preserving the privacy of all anonymous users (both abusive and non-abusive). The first scheme proposed for this purpose was Nymble, an extremely efficient scheme based only on symmetric primitives; however, Nymble relies on trusted third parties who can collude to de-anonymize users of the scheme. Two recently proposed schemes, Nymbler and Jack, reduce the trust placed in these third parties at the expense of using less-efficient asymmetric crypto primitives. We present BNymble, a scheme which matches the anonymity guarantees of Nymbler and Jack while (nearly) maintaining the efficiency of the original Nymble. The key insight of
   1366 BNymble is that we can achieve the anonymity goals of these more recent schemes by replacing only the infrequent {\textquotedblleft}User Registration{\textquotedblright} protocol from Nymble with asymmetric primitives. We prove the security of BNymble, and report on its efficiency},
   1367         www_section = {anonymous access, anonymous blacklisting, BNymble},
   1368         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2711\%20-\%20BNymble.pdf},
   1369         url = {https://bibliography.gnunet.org},
   1370         author = {Peter Lofgren and Nicholas J. Hopper}
   1371 }
   1372 @conference {wpes11-bridgespa,
   1373         title = {BridgeSPA: Improving Tor Bridges with Single Packet Authorization},
   1374         booktitle = {WPES'11--Proceedings of the Workshop on Privacy in the Electronic Society },
   1375         year = {2011},
   1376         month = oct,
   1377         publisher = {ACM},
   1378         organization = {ACM},
   1379         address = {Chicago, IL, United States},
   1380         abstract = {Tor is a network designed for low-latency anonymous communications. Tor clients form circuits through relays that are listed in a public directory, and then relay their encrypted traffic through these circuits. This indirection makes it difficult for a local adversary to determine with whom a particular Tor user is communicating. In response, some local adversaries restrict access to Tor by blocking each of the publicly listed relays. To deal with such an adversary, Tor uses bridges, which are unlisted relays that can be used as alternative entry points into the Tor network. Unfortunately, issues with Tor's bridge implementation make it easy to discover large numbers of bridges. An adversary that hoards this information may use it to determine when each bridge is online over time. If a bridge operator also browses with Tor on the same machine, this information may be sufficient to deanonymize him. We present BridgeSPA as a method to mitigate this issue. A client using BridgeSPA relies on innocuous single packet authorization (SPA) to present a time-limited key to a bridge. Before this authorization takes place, the bridge will not reveal whether it is online. We have implemented BridgeSPA as a working proof-of-concept, which is available under an open-source licence},
   1381         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2711\%20-\%20bridgeSPA.pdf},
   1382         www_section = unsorted,
   1383         url = {https://bibliography.gnunet.org},
   1384         author = {Rob Smits and Divam Jain and Sarah Pidcock and Ian Goldberg and Urs Hengartner}
   1385 }
   1386 @conference {ccs2011-cirripede,
   1387         title = {Cirripede: Circumvention Infrastructure using Router Redirection with Plausible Deniability},
   1388         booktitle = {CCS'11--Proceedings of the 18th ACM conference on Computer and Communications Security},
   1389         year = {2011},
   1390         month = oct,
   1391         publisher = {ACM},
   1392         organization = {ACM},
   1393         address = {Chicago, IL, United States},
   1394         abstract = {Many users face surveillance of their Internet communications and a significant fraction suffer from outright blocking of certain destinations. Anonymous communication systems allow users to conceal the destinations they communicate with, but do not hide the fact that the users are using them. The mere use of such systems may invite suspicion, or access to them may be blocked.
   1395 We therefore propose Cirripede, a system that can be used for unobservable communication with Internet destinations. Cirripede is designed to be deployed by ISPs; it intercepts connections from clients to innocent-looking destinations and redirects them to the true destination requested by the client. The communication is encoded in a way that is indistinguishable from normal communications to anyone without the master secret key, while public-key cryptography is used to eliminate the need for any secret information that must be shared with Cirripede users.
   1396 Cirripede is designed to work scalably with routers that handle large volumes of traffic while imposing minimal overhead on ISPs and not disrupting existing traffic. This allows Cirripede proxies to be strategically deployed at central locations, making access to Cirripede very difficult to block. We built a proof-of-concept implementation of Cirripede and performed a testbed evaluation of its performance properties},
   1397         www_section = {censorship-resistance, unobservability},
   1398         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2711\%20-\%20Cirripede.pdf},
   1399         url = {https://bibliography.gnunet.org},
   1400         author = {Amir Houmansadr and Giang T. K. Nguyen and Matthew Caesar and Borisov, Nikita}
   1401 }
   1402 @article {2011_0,
   1403         title = {Collaborative Personalized Top-k Processing},
   1404         journal = {ACM Trans. Database Syst},
   1405         volume = {36},
   1406         year = {2011},
   1407         pages = {26:1--26:38},
   1408         abstract = {This article presents P4Q, a fully decentralized gossip-based protocol to personalize query processing in social tagging systems. P4Q dynamically associates each user with social acquaintances sharing similar tagging behaviors. Queries are gossiped among such acquaintances, computed on-the-fly in a collaborative, yet partitioned manner, and results are iteratively refined and returned to the querier. Analytical and experimental evaluations convey the scalability of P4Q for top-k query processing, as well its inherent ability to cope with users updating profiles and departing},
   1409         www_section = {gossip, Peer-to-peer networks, Personalization, top-k processing},
   1410         issn = {0362-5915},
   1411         doi = {10.1145/2043652.2043659},
   1412         url = {http://doi.acm.org/10.1145/2043652.2043659},
   1413         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TopK-Processing2011Bai.pdf},
   1414         author = {Bai, Xiao and Guerraoui, Rachid and Kermarrec, Anne-Marie and Leroy, Vincent}
   1415 }
   1416 @article {2011_1,
   1417         title = {A comprehensive study of Convergent and Commutative Replicated Data Types},
   1418         number = {7506},
   1419         year = {2011},
   1420         month = jan,
   1421         institution = {INRIA Rocquencourt},
   1422         address = {Le Chensay Cedex},
   1423         abstract = {Eventual consistency aims to ensure that replicas of some mutable shared
   1424 object converge without foreground synchronisation. Previous approaches to eventual con-
   1425 sistency are ad-hoc and error-prone. We study a principled approach: to base the design of
   1426 shared data types on some simple formal conditions that are sufficient to guarantee even-
   1427 tual consistency. We call these types Convergent or Commutative Replicated Data Types
   1428 (CRDTs). This paper formalises asynchronous object replication, either state based or op-
   1429 eration based, and provides a sufficient condition appropriate for each case. It describes
   1430 several useful CRDTs, including container data types supporting both add and remove op-
   1431 erations with clean semantics, and more complex types such as graphs, montonic DAGs,
   1432 and sequences. It discusses some properties needed to implement non-trivial CRDTs},
   1433         www_section = {commutative operations, data replication, optimistic replication},
   1434         journal = {unknown},
   1435         isbn = {0249-6399},
   1436         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crdt.pdf},
   1437         url = {https://bibliography.gnunet.org},
   1438         author = {Marc Shapiro and Nuno Preguica and Carlos Baquero and Marek Zawirski}
   1439 }
   1440 @conference {2011_2,
   1441         title = {Considering Complex Search Techniques in DHTs under Churn},
   1442         booktitle = {CCNC 2011--IEEE Consumer Communications and Networking Conference},
   1443         year = {2011},
   1444         month = jan,
   1445         publisher = {IEEE Computer Society},
   1446         organization = {IEEE Computer Society},
   1447         address = {Las Vegas, NV, USA},
   1448         abstract = {Traditionally complex queries have been performed over unstructured P2P networks by means of flooding, which is inherently inefficient due to the large number of redundant messages generated. While Distributed Hash Tables (DHTs) can provide very efficient look-up operations, they traditionally do not provide any methods for complex queries. By exploiting the structure inherent in DHTs we can perform complex querying over structured P2P networks by means of efficiently broadcasting the search query. This allows every node in the network to process the query locally, and hence is as powerful and flexible as flooding in unstructured networks, but without the inefficiency of redundant messages. While there have been various approaches proposed for broadcasting search queries over DHTs, the focus has not been on validation under churn. Comparing blind search methods for DHTs though simulation we see that churn, in particular nodes leaving the network, has a large impact on query success rate. In this paper we present novel results comparing blind search over Chord and Pastry while under varying levels of churn. We further consider how different data replication strategies can be used to enhance the query success rate},
   1449         www_section = {churn, complex querie, distributed hash table, search techniques},
   1450         url = {https://bibliography.gnunet.org},
   1451         isbn = {978-1-4244-8789-9 },
   1452         doi = {http://dx.doi.org/10.1109/CCNC.2011.5766542},
   1453         author = {Jamie Furness and Mario Kolberg}
   1454 }
   1455 @conference {foci11-decoy,
   1456         title = {Decoy Routing: Toward Unblockable Internet Communication},
   1457         booktitle = {FOCI'11--Proceedings of the USENIX Workshop on Free and Open Communications on the Internet},
   1458         year = {2011},
   1459         month = aug,
   1460         address = {San Francisco, CA, USA},
   1461         abstract = {We present decoy routing, a mechanism capable of circumventing common network filtering strategies. Unlike other circumvention techniques, decoy routing does not require a client to connect to a specific IP address (which
   1462 is easily blocked) in order to provide circumvention. We show that if it is possible for a client to connect to any unblocked host/service, then decoy routing could be used to connect them to a blocked destination without cooperation from the host. This is accomplished by placing the circumvention service in the network itself -- where a single device could proxy traffic between a significant fraction of hosts -- instead of at the edge},
   1463         www_section = {decoy routing, Internet communication, network filter},
   1464         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FOCI\%2711\%20-\%20Decoy\%20Routing\%3A\%20Toward\%20Unblockable\%20Internet\%20Communication.pdf},
   1465         url = {https://bibliography.gnunet.org},
   1466         author = {Josh Karlin and Daniel Ellard and Alden W. Jackson and Christine E. Jones and Greg Lauer and David P. Mankins and W. Timothy Strayer}
   1467 }
   1468 @conference {pets2011-defenestrator,
   1469         title = {DefenestraTor: Throwing out Windows in Tor},
   1470         booktitle = {PETS'11--Proceedings of the 11th Privacy Enhancing Technologies Symposium },
   1471         year = {2011},
   1472         month = jul,
   1473         address = {Waterloo, Canada},
   1474         abstract = {Tor is one of the most widely used privacy enhancing technologies for achieving online anonymity and resisting censorship. While conventional wisdom dictates that the level of anonymity offered by Tor increases as its user base grows, the most significant obstacle to Tor adoption continues to be its slow performance. We seek to enhance Tor's performance by offering techniques to control congestion and improve flow control, thereby reducing unnecessary delays. To reduce congestion, we first evaluate small fixed-size circuit windows and a dynamic circuit window that adaptively re-sizes in response to perceived congestion. While these solutions improve web page response times and require modification only to exit routers, they generally offer poor flow control and slower downloads relative to Tor's current design. To improve flow control while reducing congestion, we implement N23, an ATM-style per-link algorithm that allows Tor routers to explicitly cap their queue lengths and signal congestion via back-pressure. Our results show that N23 offers better congestion and flow control, resulting in improved web page response times and faster page loads compared to Tor's current design and other window-based approaches. We also argue that our proposals do not enable any new attacks on Tor users' privacy},
   1475         www_section = {congestion, DefenestraTor, online anonymity, performance, privacy enhancing technologies, Tor, Windows},
   1476         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PETS\%2711\%20-\%20DefenestraTor.pdf},
   1477         url = {https://bibliography.gnunet.org},
   1478         author = {Mashael AlSabah and Kevin Bauer and Ian Goldberg and Dirk Grunwald and Damon McCoy and Stefan Savage and Geoffrey M. Voelker}
   1479 }
   1480 @article {2011_3,
   1481         title = {Distributed Private Data Analysis: On Simultaneously Solving How and What},
   1482         journal = {CoRR},
   1483         volume = {abs/1103.2626},
   1484         year = {2011},
   1485         abstract = {We examine the combination of two directions in the field of privacy concerning computations over distributed private inputs--secure function evaluation (SFE) and differential privacy. While in both the goal is to privately evaluate some function of the individual inputs, the privacy requirements are significantly different. The general feasibility results for SFE suggest a natural paradigm for implementing differentially private analyses distributively: First choose what to compute, i.e., a differentially private analysis; Then decide how to compute it, i.e., construct an SFE protocol for this analysis.
   1486 We initiate an examination whether there are advantages to a paradigm where both decisions are made simultaneously. In particular, we investigate under which accuracy requirements it is beneficial to adapt this paradigm for computing a collection of functions including binary sum, gap threshold, and approximate median queries. Our results imply that when computing the binary sum of n distributed inputs then:
   1487 * When we require that the error is o(n{\surd}) and the number of rounds is constant, there is no benefit in the new paradigm.
   1488 * When we allow an error of O(n{\surd}), the new paradigm yields more efficient protocols when we consider protocols that compute symmetric functions.
   1489 Our results also yield new separations between the local and global models of computations for private data analysis},
   1490         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributedPrivateData2008Beimel.pdf},
   1491         www_section = unsorted,
   1492         url = {https://bibliography.gnunet.org},
   1493         author = {Amos Beimel and Kobbi Nissim and Eran Omri}
   1494 }
   1495 @conference {cset11-experimentor,
   1496         title = {ExperimenTor: A Testbed for Safe and Realistic Tor Experimentation},
   1497         booktitle = {CSET'11--Proceedings of the USENIX Workshop on Cyber Security Experimentation and Test},
   1498         year = {2011},
   1499         month = aug,
   1500         address = {San Francisco, CA, USA},
   1501         abstract = {Tor is one of the most widely-used privacy enhancing technologies for achieving online anonymity and resisting censorship. Simultaneously, Tor is also an evolving research network on which investigators perform experiments to improve the network's resilience to attacks and enhance its performance. Existing methods for studying Tor have included analytical modeling, simulations, small-scale network emulations, small-scale PlanetLab deployments, and measurement and analysis of the live Tor network. Despite the growing body of work concerning Tor, there is no widely accepted methodology for conducting Tor research in a manner that preserves realism while protecting live users' privacy. In an effort to propose a standard, rigorous experimental framework for
   1502 conducting Tor research in a way that ensures safety and realism, we present the design of ExperimenTor, a large-scale Tor network emulation toolkit and testbed. We also report our early experiences with prototype testbeds currently deployed at four research institutions},
   1503         www_section = {experimentation, ExperimenTor, privacy enhancing technologies, Tor},
   1504         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSET\%2711\%20-\%20ExperimenTor.pdf},
   1505         url = {https://bibliography.gnunet.org},
   1506         author = {Kevin Bauer and Micah Sherr and Damon McCoy and Dirk Grunwald}
   1507 }
   1508 @conference {acsac11-tortoise,
   1509         title = {Exploring the Potential Benefits of Expanded Rate Limiting in Tor: Slow and Steady Wins the Race With Tortoise},
   1510         booktitle = {ACSAC'11--Proceedings of 2011 Annual Computer Security Applications Conference},
   1511         year = {2011},
   1512         month = dec,
   1513         address = {Orlando, FL, USA},
   1514         abstract = {Tor is a volunteer-operated network of application-layer relays that enables users to communicate privately and anonymously. Unfortunately, Tor often exhibits poor performance due to congestion caused by the unbalanced ratio of clients to available relays, as well as a disproportionately high consumption of network capacity by a small fraction of filesharing users.
   1515 This paper argues the very counterintuitive notion that slowing down traffic on Tor will increase the bandwidth capacity of the network and consequently improve the experience of interactive web users. We introduce Tortoise, a system for rate limiting Tor at its ingress points. We demonstrate that Tortoise incurs little penalty for interactive web users, while significantly decreasing the throughput for filesharers. Our techniques provide incentives to filesharers to configure their Tor clients to also relay traffic, which in turn improves the network's overall performance. We present large-scale emulation results that indicate that interactive users will achieve a significant speedup if even a small fraction of clients opt to run relays},
   1516         www_section = {anonymity, performance, Tor},
   1517         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2711\%20-\%20Tortoise.pdf},
   1518         url = {https://bibliography.gnunet.org},
   1519         author = {W. Brad Moore and Chris Wacek and Micah Sherr}
   1520 }
   1521 @conference {acsac11-backlit,
   1522         title = {Exposing Invisible Timing-based Traffic Watermarks with BACKLIT},
   1523         booktitle = {ACSAC'11--Proceedings of 2011 Annual Computer Security Applications Conference },
   1524         year = {2011},
   1525         month = dec,
   1526         address = {Orlando, FL, USA},
   1527         abstract = {Traffic watermarking is an important element in many network security and privacy applications, such as tracing botnet C\&C communications and deanonymizing peer-to-peer VoIP calls. The state-of-the-art traffic watermarking schemes are usually based on packet timing information and they are notoriously difficult to detect. In this paper, we show for the first time that even the most sophisticated timing-based watermarking schemes (e.g., RAINBOW and SWIRL) are not invisible by proposing a new detection system called BACKLIT. BACKLIT is designed according to the observation that any practical timing-based traffic watermark will cause noticeable alterations in the intrinsic timing features typical of TCP flows. We propose five metrics that are sufficient for detecting four state-of-the-art traffic watermarks for bulk transfer and interactive traffic. BACKLIT can be easily deployed in stepping stones and anonymity networks (e.g., Tor), because it does not rely on strong assumptions and can be realized in an active or passive mode. We have conducted extensive experiments to evaluate BACKLIT's detection performance using the PlanetLab platform. The results show that BACKLIT can detect watermarked network flows
   1528 with high accuracy and few false positives},
   1529         www_section = {BACKLIT, detection system, invisible, network security, packet timing information, privacy, traffic watermark},
   1530         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2711\%20-\%20BACKLIT.pdf},
   1531         url = {https://bibliography.gnunet.org},
   1532         author = {Xiapu Luo and Peng Zhou and Junjie Zhang and Roberto Perdisci and Wenke Lee and Rocky K. C. Chang}
   1533 }
   1534 @conference {wpes11-faust,
   1535         title = {FAUST: Efficient, TTP-Free Abuse Prevention by Anonymous Whitelisting},
   1536         booktitle = {WPES'11--Proceedings of the Workshop on Privacy in the Electronic Society},
   1537         year = {2011},
   1538         month = oct,
   1539         publisher = {ACM},
   1540         organization = {ACM},
   1541         address = {Chicago, IL, United States},
   1542         abstract = {We introduce Faust, a solution to the {\textquotedblleft}anonymous blacklisting problem:{\textquotedblright} allow an anonymous user to prove that she is authorized to access an online service such that if the user misbehaves, she retains her anonymity but will be unable to
   1543 authenticate in future sessions. Faust uses no trusted third parties and is one to two orders of magnitude more efficient than previous schemes without trusted third parties. The key idea behind Faust is to eliminate the explicit blacklist used in all previous approaches, and rely instead on an implicit whitelist, based on blinded authentication tokens},
   1544         www_section = {anonymous authentication, anonymous blacklisting, privacy-enhancing revocation},
   1545         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2711\%20-\%20FAUST.pdf},
   1546         url = {https://bibliography.gnunet.org},
   1547         author = {Peter Lofgren and Nicholas J. Hopper}
   1548 }
   1549 @conference {Prusty:2011:FIO:2046707.2046731,
   1550         title = {Forensic investigation of the OneSwarm anonymous filesharing system},
   1551         booktitle = {Proceedings of the 18th ACM conference on Computer and communications security},
   1552         series = {CCS '11},
   1553         year = {2011},
   1554         pages = {201--214},
   1555         publisher = {ACM},
   1556         organization = {ACM},
   1557         address = {New York, NY, USA},
   1558         abstract = {OneSwarm is a system for anonymous p2p file sharing in use by thousands of peers. It aims to provide Onion Routing-like privacy and BitTorrent-like performance. We demonstrate several flaws in OneSwarm's design and implementation through three different attacks available to forensic investigators. First, we prove that the current design is vulnerable to a novel timing attack that allows just two attackers attached to the same target to determine if it is the source of queried content. When attackers comprise 15\% of OneSwarm peers, we expect over 90\% of remaining peers will be attached to two attackers and therefore vulnerable. Thwarting the attack increases OneSwarm query response times, making them longer than the equivalent in Onion Routing. Second, we show that OneSwarm's vulnerability to traffic analysis by colluding attackers is much greater than was previously reported, and is much worse than Onion Routing. We show for this second attack that when investigators comprise 25\% of peers, over 40\% of the network can be investigated with 80\% precision to find the sources of content. Our examination of the OneSwarm source code found differences with the technical paper that significantly reduce security. For the implementation in use by thousands of people, attackers that comprise 25\% of the network can successfully use this second attack against 98\% of remaining peers with 95\% precision. Finally, we show that a novel application of a known TCP-based attack allows a single attacker to identify whether a neighbor is the source of data or a proxy for it. Users that turn off the default rate-limit setting are exposed. Each attack can be repeated as investigators leave and rejoin the network. All of our attacks are successful in a forensics context: Law enforcement can use them legally ahead of a warrant. Furthermore, private investigators, who have fewer restrictions on their behavior, can use them more easily in pursuit of evidence for such civil suits as copyright infringement},
   1559         www_section = {anonymity, OneSwarm, p2p network},
   1560         isbn = {978-1-4503-0948-6},
   1561         doi = {http://doi.acm.org/10.1145/2046707.2046731},
   1562         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/prusty.ccs_.2011.pdf},
   1563         url = {https://bibliography.gnunet.org},
   1564         author = {Prusty, Swagatika and Brian Neil Levine and Marc Liberatore}
   1565 }
   1566 @conference {oakland11-formalizing,
   1567         title = {Formalizing Anonymous Blacklisting Systems},
   1568         booktitle = {Proceedings of the 2011 IEEE Symposium on Security and Privacy},
   1569         year = {2011},
   1570         month = may,
   1571         address = {San Francisco, CA, USA},
   1572         abstract = {Anonymous communications networks, such as Tor, help to solve the real and important problem of enabling users to communicate privately over the Internet. However, in doing so, anonymous communications networks introduce an entirely new problem for the service providers{\textemdash}such as websites, IRC networks or mail servers{\textemdash}with which these users interact; in particular, since all anonymous users look alike, there is no way for the service providers to hold individual misbehaving anonymous users accountable for their actions. Recent
   1573 research efforts have focused on using anonymous blacklisting systems (which are sometimes called anonymous revocation systems) to empower service providers with the ability to revoke access from abusive anonymous users. In contrast to revocable anonymity systems, which enable some trusted third party to deanonymize users, anonymous blacklisting systems provide users with a way to authenticate anonymously with a service provider, while enabling the service provider to revoke access from any users that misbehave, without revealing their identities. In this paper, we introduce the anonymous blacklisting
   1574 problem and survey the literature on anonymous blacklisting systems, comparing and contrasting the architecture of various existing schemes, and discussing the tradeoffs inherent with each design. The literature on anonymous blacklisting systems lacks a unified set of definitions; each scheme operates under different trust assumptions and provides different security and privacy guarantees. Therefore, before we discuss the existing approaches in detail, we first propose a formal definition for anonymous blacklisting systems, and a set of security and privacy properties that these systems should possess. We also
   1575 outline a set of new performance requirements that anonymous blacklisting systems should satisfy to maximize their potential for real-world adoption, and give formal definitions for several optional features already supported by some schemes in the literature},
   1576         www_section = {anonymity, anonymous blacklisting, authentication, privacy enhancing technologies, privacy-enhanced revocation},
   1577         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Formalizing\%20Anonymous\%20Blacklisting\%20Systems.pdf},
   1578         url = {https://bibliography.gnunet.org},
   1579         author = {Ryan Henry and Ian Goldberg}
   1580 }
   1581 @conference { grothoff2011syssec,
   1582         title = {The Free Secure Network Systems Group: Secure Peer-to-Peer Networking and Beyond},
   1583         booktitle = {SysSec 2011},
   1584         year = {2011},
   1585         address = {Amsterdam, Netherlands},
   1586         abstract = {This paper introduces the current research and future plans of the Free Secure Network Systems Group at the Technische Universit\&auml;t M\&uuml;nchen.  In particular, we provide some insight into the development process and architecture of the GNUnet P2P framework and the challenges we are currently working on},
   1587         www_section = {anonymity, GNUnet, routing},
   1588         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/syssec2011.pdf},
   1589         url = {https://bibliography.gnunet.org},
   1590         author = {Christian Grothoff}
   1591 }
   1592 @article {2011_4,
   1593         title = {High-speed high-security signatures},
   1594         journal = {Journal of Cryptographic Engineering},
   1595         volume = {2},
   1596         year = {2011},
   1597         month = sep,
   1598         pages = {77--89},
   1599         chapter = {77},
   1600         www_section = {ECC, Ed25519, EdDSA, GNUnet},
   1601         url = {http://ed25519.cr.yp.to/papers.html},
   1602         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ed25519-20110926.pdf},
   1603         author = {Daniel J. Bernstein and Niels Duif and Tanja Lange and Peter Schwabe and Bo-Yin Hang}
   1604 }
   1605 @book {2011_5,
   1606         title = {How Much Is Enough? Choosing {\epsilon} for Differential Privacy},
   1607         booktitle = {Information Security},
   1608         series = {Lecture Notes in Computer Science},
   1609         volume = {7001},
   1610         year = {2011},
   1611         pages = {325--340},
   1612         publisher = {Springer Berlin Heidelberg},
   1613         organization = {Springer Berlin Heidelberg},
   1614         abstract = {Differential privacy is a recent notion, and while it is nice conceptually it has been difficult to apply in practice. The parameters of differential privacy have an intuitive theoretical interpretation, but the implications and impacts on the risk of disclosure in practice have not yet been studied, and choosing appropriate values for them is non-trivial. Although the privacy parameter {\epsilon} in differential privacy is used to quantify the privacy risk posed by releasing statistics computed on sensitive data, {\epsilon} is not an absolute measure of privacy but rather a relative measure. In effect, even for the same value of {\epsilon} , the privacy guarantees enforced by differential privacy are different based on the domain of attribute in question and the query supported. We consider the probability of identifying any particular individual as being in the database, and demonstrate the challenge of setting the proper value of {\epsilon} given the goal of protecting individuals in the database with some fixed probability},
   1615         www_section = {Differential Privacy, Privacy Parameter, epsilon},
   1616         isbn = {978-3-642-24860-3},
   1617         doi = {10.1007/978-3-642-24861-0_22},
   1618         url = {http://dx.doi.org/10.1007/978-3-642-24861-0_22},
   1619         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Choosing-\%CE\%B5-2011Lee.pdf},
   1620         author = {Lee, Jaewoo and Clifton, Chris},
   1621         editor = {Lai, Xuejia and Zhou, Jianying and Li, Hui}
   1622 }
   1623 @mastersthesis {kevin-thesis,
   1624         title = {Improving Security and Performance in Low Latency Anonymity Networks},
   1625         year = {2011},
   1626         month = may,
   1627         pages = {0--240},
   1628         school = {University of Colorado},
   1629         type = {PhD},
   1630         abstract = {Conventional wisdom dictates that the level of anonymity offered by low latency anonymity networks increases as the user base grows. However, the most significant obstacle to increased adoption of such systems is that their security and performance properties are perceived to be weak. In an effort to help foster adoption, this dissertation aims to better understand and improve security, anonymity, and performance in low latency anonymous communication systems.
   1631 
   1632 To better understand the security and performance properties of a popular low latency anonymity network, we characterize Tor, focusing on its application protocol distribution, geopolitical client and router distributions, and performance. For instance, we observe that peer-to-peer file sharing protocols use an unfair portion of the network's scarce bandwidth. To reduce the congestion produced by bulk downloaders in networks such as Tor, we design, implement, and analyze an anonymizing network tailored specifically for the BitTorrent peer-to-peer file sharing protocol. We next analyze Tor's security and anonymity properties and empirically show that Tor is vulnerable to practical end-to-end traffic correlation attacks launched by relatively weak adversaries that inflate their bandwidth claims to attract traffic and thereby compromise key positions on clients' paths. We also explore the security and performance trade-offs that revolve around path length design decisions and we show that shorter paths offer performance benefits and provide increased resilience to certain attacks. Finally, we discover a source of performance degradation in Tor that results from poor congestion and flow control. To improve Tor's performance and grow its user base, we offer a fresh approach to congestion and flow control inspired by techniques from IP and ATM networks},
   1633         www_section = {low latency anonymous networks, performance, security},
   1634         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kevin-thesis.pdf},
   1635         url = {https://bibliography.gnunet.org},
   1636         author = {Kevin Bauer}
   1637 }
   1638 @conference {wk11-malice-vs-anon,
   1639         title = {Malice versus AN.ON: Possible Risks of Missing Replay and Integrity Protection},
   1640         booktitle = {FC'11--Proceedings of Financial Cryptography and Data Security },
   1641         year = {2011},
   1642         month = feb,
   1643         address = {St. Lucia},
   1644         abstract = {In this paper we investigate the impact of missing replay protection as well as missing integrity protection concerning a local attacker in AN.ON. AN.ON is a low latency anonymity network mostly used to anonymize web traffic. We demonstrate that both protection mechanisms are important by presenting two attacks that become feasible as soon as the mechanisms are missing. We mount both attacks on the AN.ON network which neither implements replay protection nor integrity protection yet},
   1645         www_section = {AN.ON, anonymity network, integrity protection, replay protection},
   1646         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2711\%20-\%20Malice\%20versus\%20AN.ON_.pdf},
   1647         url = {https://bibliography.gnunet.org},
   1648         author = {Benedikt Westermann and Dogan Kesdogan}
   1649 }
   1650 @article {Tariq:2011:MSQ:2063320.2063330,
   1651         title = {Meeting subscriber-defined QoS constraints in publish/subscribe systems},
   1652         journal = {Concurr. Comput. : Pract. Exper},
   1653         volume = {23},
   1654         number = {17},
   1655         year = {2011},
   1656         pages = {2140--2153},
   1657         publisher = {John Wiley and Sons Ltd},
   1658         address = {Chichester, UK},
   1659         www_section = {content-based, publish/subscribe, QoS},
   1660         issn = {1532-0626},
   1661         doi = {10.1002/cpe.1751},
   1662         url = {http://dx.doi.org/10.1002/cpe.1751},
   1663         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tariq2011Meeting.pdf},
   1664         author = {Tariq, Muhammad Adnan and Boris Koldehofe and Gerald G. Koch and Khan, Imran and Kurt Rothermel}
   1665 }
   1666 @mastersthesis {2011_6,
   1667         title = {Methods for Secure Decentralized Routing in Open Networks},
   1668         volume = {Dr. rer. nat},
   1669         year = {2011},
   1670         month = aug,
   1671         pages = {0--234},
   1672         school = {Technische Universit{\"a}t M{\"u}nchen},
   1673         address = {Garching bei M{\"u}nchen},
   1674         abstract = {  The contribution of this thesis is the study and improvement of secure, decentralized, robust routing algorithms for open networks including ad-hoc networks and peer-to-peer (P2P) overlay networks. The main goals for our secure routing algorithm are openness, efficiency, scalability and resilience to various types of attacks. Common P2P routing algorithms trade-off decentralization for security; for instance by choosing whether or not to require a centralized authority to allow peers to join the network. Other algorithms trade scalability for security, for  example employing random search or flooding to prevent certain types of attacks.  Our design attempts to meet our security goals in an open system, while limiting the performance penalties incurred.
   1675 
   1676   The first step we took towards designing our routing algorithm was an analysis of the routing algorithm in Freenet.  This algorithm is relevant because it achieves efficient (order O(log n)) routing in realistic network topologies in a fully decentralized open network.  However, we demonstrate why their algorithm is not secure, as malicious participants are able to severely disrupt the operation of the network.  The main difficulty with the Freenet routing algorithm is that for performance it relies on information received from untrusted peers.  We also detail a range of proposed solutions, none of which we found to fully fix the problem.
   1677 
   1678  A related problem for efficient routing in sparsely connected networks is the difficulty in sufficiently populating routing tables.  One way to improve connectivity in P2P overlay networks is by utilizing modern NAT traversal techniques.  We employ a number of standard NAT traversal techniques in our approach, and also developed
   1679 and experimented with a novel method for NAT traversal based on ICMP and UDP hole punching.  Unlike other NAT traversal techniques ours does not require a trusted third party.
   1680 
   1681 Another technique we use in our implementation to help address the connectivity problem in sparse networks is the use of distance vector routing in a small local neighborhood. The distance vector variant used in our system employs onion routing to secure the resulting indirect connections. Materially to this design, we discovered a  serious vulnerability in the Tor protocol which allowed us to use a DoS attack to reduce the anonymity of the users of this extant anonymizing P2P network.  This vulnerability is based on allowing paths of unrestricted length for onion routes through the network. Analyzing Tor and implementing this attack gave us valuable knowledge
   1682 which helped when designing the distance vector routing protocol for our system.
   1683 
   1684   Finally, we present the design of our new secure randomized routing algorithm that does not suffer from the various problems we discovered in previous designs. Goals for the algorithm include providing efficiency and robustness in the presence of malicious participants for an open, fully decentralized network without trusted authorities. We provide a mathematical analysis of the algorithm itself and have created and deployed an implementation of this algorithm in GNUnet. In this thesis we also provide a detailed overview of a distributed
   1685 emulation framework capable of running a large number of nodes using our full code base as well as some of the challenges encountered in creating and using such a testing framework.  We present extensive experimental results showing that our routing algorithm outperforms the dominant DHT design in target topologies, and performs comparably in other scenarios},
   1686         www_section = {distributed hash table, Freenet, GNUnet, NAT, R5N, Tor},
   1687         isbn = {3-937201-26-2},
   1688         issn = {1868-2642},
   1689         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NET-2011-08-1.pdf},
   1690         url = {https://bibliography.gnunet.org},
   1691         author = {Nathan S Evans}
   1692 }
   1693 @article {2011_7,
   1694         title = {Multi-objective optimization based privacy preserving distributed data mining in Peer-to-Peer networks},
   1695         journal = {Peer-to-Peer Networking and Applications},
   1696         volume = {4},
   1697         year = {2011},
   1698         pages = {192--209},
   1699         abstract = {This paper proposes a scalable, local privacy-preserving algorithm for distributed Peer-to-Peer (P2P) data aggregation useful for many advanced data mining/analysis tasks such as average/sum computation, decision tree induction, feature selection, and more. Unlike most multi-party privacy-preserving data mining algorithms, this approach works in an asynchronous manner through local interactions and it is highly scalable. It particularly deals with the distributed computation of the sum of a set of numbers stored at different peers in a P2P network in the context of a P2P web mining application. The proposed optimization-based privacy-preserving technique for computing the sum allows different peers to specify different privacy requirements without having to adhere to a global set of parameters for the chosen privacy model. Since distributed sum computation is a frequently used primitive, the proposed approach is likely to have significant impact on many data mining tasks such as multi-party privacy-preserving clustering, frequent itemset mining, and statistical aggregate computation},
   1700         www_section = {Data mining, peer-to-peer, Privacy preserving},
   1701         issn = {1936-6442},
   1702         doi = {10.1007/s12083-010-0075-1},
   1703         url = {http://dx.doi.org/10.1007/s12083-010-0075-1},
   1704         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Privacy_PPNA2011Das.pdf},
   1705         author = {Das, Kamalika and Bhaduri, Kanishka and Kargupta, Hillol}
   1706 }
   1707 @conference {LEBLOND:2011:INRIA-00574178:1,
   1708         title = {One Bad Apple Spoils the Bunch: Exploiting P2P Applications to Trace and Profile Tor Users},
   1709         booktitle = {4th USENIX Workshop on Large-Scale Exploits and Emergent Threats (LEET '11)},
   1710         year = {2011},
   1711         month = mar,
   1712         publisher = {USENIX},
   1713         organization = {USENIX},
   1714         address = {Boston, United States},
   1715         abstract = {Tor is a popular low-latency anonymity network. However, Tor does not protect against the exploitation of an insecure application to reveal the IP address of, or trace, a TCP stream. In addition, because of the linkability of Tor streams sent together over a single circuit, tracing one stream sent over a circuit traces them all. Surprisingly, it is unknown whether this linkability allows in practice to trace a significant number of streams originating from secure (i.e., proxied) applications. In this paper, we show that linkability allows us to trace 193\% of additional streams, including 27\% of HTTP streams possibly originating from {\textquoteleft}{\textquoteleft}secure'' browsers. In particular, we traced 9\% of Tor streams carried by our instrumented exit nodes. Using BitTorrent as the insecure application, we design two attacks tracing BitTorrent users on Tor. We run these attacks in the wild for 23 days and reveal 10,000 IP addresses of Tor users. Using these IP addresses, we then profile not only the BitTorrent downloads but also the websites visited per country of origin of Tor users. We show that BitTorrent users on Tor are over-represented in some countries as compared to BitTorrent users outside of Tor. By analyzing the type of content downloaded, we then explain the observed behaviors by the higher concentration of pornographic content downloaded at the scale of a country. Finally, we present results suggesting the existence of an underground BitTorrent ecosystem on Tor},
   1716         www_section = {anonymity, Tor},
   1717         url = {http://hal.inria.fr/inria-00574178/PDF/btor.pdf},
   1718         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/btor.pdf},
   1719         author = {Le Blond, Stevens and Manils, Pere and Abdelberi, Chaabane and Kaafar, Mohamed Ali and Claude Castelluccia and Legout, Arnaud and Dabbous, Walid}
   1720 }
   1721 @article {perea-tissec11,
   1722         title = {PEREA: Practical TTP-free revocation of repeatedly misbehaving anonymous users},
   1723         journal = {ACM Transactions on Information and System Security ({ACM TISSEC})},
   1724         volume = {14},
   1725         year = {2011},
   1726         month = dec,
   1727         pages = {29:1--29:34},
   1728         publisher = {ACM},
   1729         address = {New York, NY, USA},
   1730         abstract = {Several anonymous authentication schemes allow servers to revoke a misbehaving user's future accesses.
   1731 Traditionally, these schemes have relied on powerful Trusted Third Parties (TTPs) capable of deanonymizing
   1732 (or linking) users' connections. Such TTPs are undesirable because users' anonymity is not guaranteed, and users must trust them to judge {\textquoteleft}misbehavior' fairly. Recent schemes such as Blacklistable Anonymous Credentials (BLAC) and Enhanced Privacy ID (EPID) support {\textquotedblleft}privacy-enhanced revocation{\textquotedblright} {\textemdash} servers can revoke misbehaving users without a TTP's involvement, and without learning the revoked users' identities.
   1733 In BLAC and EPID, however, the computation required for authentication at the server is linear in the size (L) of the revocation list, which is impractical as the size approaches thousands of entries. We propose PEREA, a new anonymous authentication scheme for which this bottleneck of computation is independent of the size of the revocation list. Instead, the time complexity of authentication is linear in the size of a revocation window K L, the number of subsequent authentications before which a user's misbehavior must be recognized if the user is to be revoked. We extend PEREA to support more complex revocation policies that take the severity of misbehaviors into account. Users can authenticate anonymously if their naughtiness, i.e., the sum of the severities of their blacklisted misbehaviors, is below a certain naughtiness threshold.
   1734 We call our extension PEREA-Naughtiness. We prove the security of our constructions, and validate their efficiency as compared to BLAC both analytically and quantitatively},
   1735         www_section = {anonymous authentication, anonymous blacklisting, privacy, privacy-enhanced revocation, user misbehavior},
   1736         issn = {1094-9224},
   1737         doi = {http://doi.acm.org/10.1145/2043628.2043630},
   1738         url = {http://doi.acm.org/10.1145/2043628.2043630},
   1739         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TISSEC\%20-\%20PEREA.pdf},
   1740         author = {Man Ho Au and Patrick P. Tsang and Apu Kapadia}
   1741 }
   1742 @article {gauger2011lj,
   1743         title = {Performance Regression Monitoring with Gauger},
   1744         journal = {LinuxJournal},
   1745         number = {209},
   1746         year = {2011},
   1747         month = {September},
   1748         chapter = {68},
   1749         www_section = {Gauger, GNUnet},
   1750         url = {http://www.linuxjournaldigital.com/linuxjournal/201109$\#$pg68},
   1751         author = {Polot, Bartlomiej and Christian Grothoff}
   1752 }
   1753 @conference {usenix11-pirtor,
   1754         title = {PIR-Tor: Scalable Anonymous Communication Using Private Information Retrieval},
   1755         booktitle = {Proceedings of the 20th USENIX Security Symposium},
   1756         year = {2011},
   1757         month = aug,
   1758         address = {San Francisco, CA, USA},
   1759         abstract = {Existing anonymous communication systems like Tor do not scale well as they require all users to maintain up-to-date information about all available Tor relays in the system. Current proposals for scaling anonymous communication advocate a peer-to-peer (P2P) approach. While the P2P paradigm scales to millions of nodes, it provides new opportunities to compromise anonymity. In this paper, we step away from the P2P paradigm and advocate a client-server approach to scalable anonymity. We propose PIR-Tor, an architecture for the Tor network in which users obtain information about only a few onion routers using private information retrieval techniques. Obtaining information about only a few onion routers is the key to the scalability of our approach, while the use of private retrieval information techniques helps preserve client anonymity. The security of our architecture depends on the security of PIR schemes which are
   1760 well understood and relatively easy to analyze, as opposed to peer-to-peer designs that require analyzing extremely complex and dynamic systems. In particular, we demonstrate that reasonable parameters of our architecture provide equivalent security to that of the Tor network. Moreover, our experimental results show that the overhead of PIR-Tor is manageable even when the Tor network scales by two orders of magnitude},
   1761         www_section = {anonymous communication, peer to peer, PIR-Tor},
   1762         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/USENIX\%20-\%20PIR-Tor.pdf},
   1763         url = {https://bibliography.gnunet.org},
   1764         author = {Prateek Mittal and Femi Olumofin and Carmela Troncoso and Borisov, Nikita and Ian Goldberg}
   1765 }
   1766 @conference {DK11,
   1767         title = {Practical Privacy-Preserving Multiparty Linear Programming Based on Problem Transformation},
   1768         booktitle = {PASSAT'11--Proceedings of the Third IEEE International Conference on Information Privacy, Security, Risk and Trust},
   1769         year = {2011},
   1770         month = oct,
   1771         pages = {916--924},
   1772         publisher = {IEEE Computer Society},
   1773         organization = {IEEE Computer Society},
   1774         address = {Boston, Massachusetts, USA},
   1775         abstract = {Cryptographic solutions to privacy-preserving multiparty linear programming are slow. This makes them unsuitable for many economically important applications, such as supply chain optimization, whose size exceeds their practically feasible input range. In this paper we present a privacy-preserving trans- formation that allows secure outsourcing of the linear program computation in an ef?cient manner. We evaluate security by quantifying the leakage about the input after the transformation and present implementation results. Using this transformation, we can mostly replace the costly cryptographic operations and securely solve problems several orders of magnitude larger},
   1776         www_section = {cryptography, SMC},
   1777         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PASSAT\%2711\%20-\%20Multiparty\%20linear\%20programming.pdf},
   1778         url = {https://bibliography.gnunet.org},
   1779         author = {Dreier, Jannik and Florian Kerschbaum}
   1780 }
   1781 @mastersthesis {2011_8,
   1782         title = {Privacy-Implications of Performance-Based Peer Selection by Onion-Routers: A Real-World Case Study using I2P},
   1783         volume = {M.S},
   1784         year = {2011},
   1785         month = mar,
   1786         pages = {0--59},
   1787         school = {Technische Universit{\"a}t M{\"u}nchen},
   1788         type = {M.S},
   1789         address = {Garching bei M{\"u}nchen},
   1790         abstract = {The Invisible Internet Project (I2P) is one of the most widely
   1791 used anonymizing Peer-to-Peer networks on the Internet today.  Like
   1792 Tor, it uses onion routing to build tunnels between peers as the basis
   1793 for providing anonymous communication channels.  Unlike Tor, I2P
   1794 integrates a range of anonymously hosted services directly with the
   1795 platform.  This thesis presents a new attack on the I2P Peer-to-Peer
   1796 network, with the goal of determining the identity of peers that are
   1797 anonymously hosting HTTP (Eepsite) services in the network.
   1798 
   1799 Key design choices made by I2P developers, in particular
   1800 performance-based peer selection, enable a sophisticated adversary
   1801 with modest resources to break key security assumptions.  Our attack
   1802 first obtains an estimate of the victim's view of the network.  Then,
   1803 the adversary selectively targets a small number of peers used by the
   1804 victim with a denial-of-service attack while giving the victim the
   1805 opportunity to replace those peers with other peers that are
   1806 controlled by the adversary.  Finally, the adversary performs some
   1807 simple measurements to determine the identity of the peer hosting the
   1808 service.
   1809 
   1810 This thesis provides the necessary background on I2P, gives details on
   1811 the attack --- including experimental data from measurements against the
   1812 actual I2P network --- and discusses possible solutions},
   1813         www_section = {anonymity, attack, denial-of-service, I2P},
   1814         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herrmann2011mt.pdf},
   1815         url = {https://bibliography.gnunet.org},
   1816         author = {Michael Herrmann}
   1817 }
   1818 @conference { herrmann2010pet,
   1819         title = {Privacy-Implications of Performance-Based Peer Selection by Onion-Routers: A Real-World Case Study using I2P},
   1820         booktitle = {Privacy Enhancing Technologies Symposium (PETS 2011)},
   1821         year = {2011},
   1822         month = apr,
   1823         publisher = {Springer Verlag},
   1824         organization = {Springer Verlag},
   1825         address = {Waterloo, Canada},
   1826         abstract = {I2P is one of the most widely used anonymizing Peer-to-Peer networks on the Internet today.  Like Tor, it uses onion routing to build tunnels between peers as the basis for providing anonymous communication channels.  Unlike Tor, I2P integrates a range of anonymously hosted services directly with the platform.  This paper presents a new attack on the I2P Peer-to-Peer network, with the goal
   1827 of determining the identity of peers that are anonymously hosting HTTP services (Eepsite) in the network.
   1828 
   1829 Key design choices made by I2P developers, in particular
   1830 performance-based peer selection, enable a sophisticated adversary with modest resources to break key security assumptions.  Our attack first obtains an estimate of the victim's view of the network.  Then, the adversary selectively targets a small number of peers used by the
   1831 victim with a denial-of-service attack while giving the victim the opportunity to replace those peers with other peers that are controlled by the adversary.  Finally, the adversary performs some simple measurements to determine the identity of the peer hosting the service.
   1832 
   1833 This paper provides the necessary background on I2P, gives details on the attack --- including experimental data from measurements against the actual I2P network --- and discusses possible solutions},
   1834         www_section = {anonymity, attack, Guard, I2P, onion routing},
   1835         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet2011i2p.pdf},
   1836         url = {https://bibliography.gnunet.org},
   1837         author = {Michael Herrmann and Christian Grothoff}
   1838 }
   1839 @book {2011_9,
   1840         title = {Private Similarity Computation in Distributed Systems: From Cryptography to Differential Privacy},
   1841         booktitle = {Principles of Distributed Systems},
   1842         series = {Lecture Notes in Computer Science},
   1843         volume = {7109},
   1844         year = {2011},
   1845         pages = {357--377},
   1846         publisher = {Springer Berlin Heidelberg},
   1847         organization = {Springer Berlin Heidelberg},
   1848         abstract = {In this paper, we address the problem of computing the similarity between two users (according
   1849 to their profiles) while preserving their privacy in a fully decentralized system and for the passive adversary
   1850 model. First, we introduce a two-party protocol for privately computing a threshold version of the similarity and apply it to well-known similarity measures such as the scalar product and the cosine similarity. The output of this protocol is only one bit of information telling whether or not two users are similar beyond a predetermined threshold. Afterwards, we explore the computation of the exact and threshold similarity within the context of differential privacy. Differential privacy is a recent notion developed within the field of private data analysis guaranteeing that an adversary that observes the output of the differentially
   1851 private mechanism, will only gain a negligible advantage (up to a privacy parameter) from the presence (or absence) of a particular item in the profile of a user. This provides a strong privacy guarantee that holds independently of the auxiliary knowledge that the adversary might have. More specifically, we design several differentially private variants of the exact and threshold protocols that rely on the addition of random noise tailored to the sensitivity of the considered similarity measure. We also analyze their complexity as well as their impact on the utility of the resulting similarity measure. Finally, we provide experimental results validating the effectiveness of the proposed approach on real datasets},
   1852         www_section = {Differential Privacy, homomorphic encryption, privacy, similarity measure},
   1853         isbn = {978-3-642-25872-5},
   1854         doi = {10.1007/978-3-642-25873-2_25},
   1855         url = {http://dx.doi.org/10.1007/978-3-642-25873-2_25},
   1856         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateSimilarity2011Alaggan.pdf},
   1857         author = {Alaggan, Mohammad and Gambs, S{\'e}bastien and Kermarrec, Anne-Marie},
   1858         editor = {Fern{\`a}ndez Anta, Antonio and Lipari, Giuseppe and Roy, Matthieu}
   1859 }
   1860 @conference {proximax11,
   1861         title = {Proximax: Fighting Censorship With an Adaptive System for Distribution of Open Proxies},
   1862         booktitle = {FC'11--Proceedings of Financial Cryptography and Data Security },
   1863         year = {2011},
   1864         month = feb,
   1865         address = {St. Lucia},
   1866         abstract = {Many people currently use proxies to circumvent government censorship that blocks access to content on the Internet. Unfortunately, the dissemination channels used to distribute proxy server locations are increasingly being monitored to discover and quickly block these proxies. This has given rise to a large number of ad hoc dissemination channels that leverage trust networks to reach legitimate users and at the same time prevent proxy server addresses from falling into the hands of censors. To address this problem in a more principled manner, we present Proximax, a robust system that continuously distributes pools of proxies to a large number of channels. The key research challenge in Proximax is to distribute the proxies among the different channels in a way that maximizes the usage of these proxies while minimizing the risk of having them blocked. This is challenging because of two conflicting goals: widely disseminating the location of the proxies to fully utilize their capacity and preventing (or at least delaying) their discovery by censors. We present a practical system that lays out a design and analytical model that balances these factors},
   1867         www_section = {government censorship, Proximax, proxy},
   1868         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2711\%20-\%20Proximax.pdf},
   1869         url = {https://bibliography.gnunet.org},
   1870         author = {Kirill Levchenko and Damon McCoy}
   1871 }
   1872 @conference {DBLP:conf/dbsec/Kerschbaum11,
   1873         title = {Public-Key Encrypted Bloom Filters with Applications to Supply Chain Integrity},
   1874         booktitle = {Public-Key Encrypted Bloom Filters with Applications to Supply Chain Integrity},
   1875         year = {2011},
   1876         pages = {60--75},
   1877         www_section = unsorted,
   1878         url = {https://bibliography.gnunet.org},
   1879         author = {Florian Kerschbaum}
   1880 }
   1881 @conference {R5N,
   1882         title = {R5N : Randomized Recursive Routing for Restricted-Route Networks},
   1883         booktitle = {5th International Conference on Network and System Security (NSS 2011)},
   1884         year = {2011},
   1885         month = sep,
   1886         publisher = {IEEE},
   1887         organization = {IEEE},
   1888         address = {Milan, Italy},
   1889         abstract = {This paper describes a new secure DHT routing algorithm for open, decentralized P2P networks operating in a restricted-route environment with malicious participants.  We have implemented our routing algorithm and have evaluated its performance under various topologies and in the presence of malicious peers.  For small-world topologies, our algorithm provides significantly better performance when compared to existing methods. In more densely connected topologies, our performance is better than or on par with other designs},
   1890         www_section = {distributed hash table, GNUnet, R5N, routing},
   1891         www_tags = selected,
   1892         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nss2011.pdf},
   1893         url = {https://bibliography.gnunet.org},
   1894         author = {Nathan S Evans and Christian Grothoff}
   1895 }
   1896 @conference {2011_11,
   1897         title = {On the Relation Between Differential Privacy and Quantitative Information Flow},
   1898         booktitle = {Proceedings of the 38th International Conference on Automata, Languages and Programming--Volume Part II},
   1899         year = {2011},
   1900         publisher = {Springer-Verlag},
   1901         organization = {Springer-Verlag},
   1902         address = {Berlin, Heidelberg},
   1903         abstract = {Differential privacy is a notion that has emerged in the community of statistical databases, as a response to the problem of protecting the privacy of the database's participants when performing statistical queries. The idea is that a randomized query satisfies differential privacy if the likelihood of obtaining a certain answer for a database x is not too different from the likelihood of obtaining the same answer on adjacent databases, i.e. databases which differ from x for only one individual. Information flow is an area of Security concerned with the problem of controlling the leakage of confidential information in programs and protocols. Nowadays, one of the most established approaches to quantify and to reason about leakage is based on the R{\'e}nyi min entropy version of information theory.
   1904 
   1905 In this paper, we analyze critically the notion of differential privacy in light of the conceptual framework provided by the R{\'e}nyi min information theory. We show that there is a close relation between differential
   1906 privacy and leakage, due to the graph symmetries induced by the adjacency relation. Furthermore, we consider the utility of the randomized answer, which measures its expected degree of accuracy. We focus on certain kinds of utility functions called {\textquotedblleft}binary{\textquotedblright}, which have a close correspondence with the R{\'e}nyi min mutual information. Again, it turns out that there can be a tight correspondence between differential privacy and utility, depending on the symmetries induced by the adjacency relation and by the query. Depending on these symmetries we can also build an optimal-utility randomization mechanism while preserving the required level of differential privacy. Our main contribution is a study of the kind of structures that can be induced by the adjacency relation and the query, and how to use them to derive bounds on the leakage and achieve the optimal utility},
   1907         isbn = {978-3-642-22011-1},
   1908         url = {http://dl.acm.org/citation.cfm?id=2027223.2027228},
   1909         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DifferentialPrivacy2011Alvim.pdf},
   1910         www_section = unsorted,
   1911         author = {Alvim, M{\'a}rio S. and Andr{\'e}s, Miguel E.}
   1912 }
   1913 @article {cryptoeprint:2011:232,
   1914         title = {Remote Timing Attacks are Still Practical},
   1915         year = {2011},
   1916         note = {\url{http://eprint.iacr.org/}},
   1917         month = apr,
   1918         institution = {Cryptology ePrint Archive},
   1919         abstract = {For over two decades, timing attacks have been an active area of research within applied cryptography. These attacks exploit cryptosystem or protocol implementations that do not run in constant time. When implementing an elliptic curve cryptosystem with a goal to provide side-channel resistance, the scalar multiplication routine is a critical component. In such instances, one attractive method often suggested in the literature is Montgomery's ladder that performs a fixed sequence of curve and field operations. This paper describes a timing attack vulnerability in OpenSSL's ladder implementation for curves over binary fields. We use this vulnerability to steal the private key of a TLS server where the server authenticates with ECDSA signatures. Using the timing of the exchanged messages, the messages themselves, and the signatures, we mount a lattice attack that recovers the private key. Finally, we describe and implement an effective countermeasure},
   1920         www_section = {elliptic curve cryptography, lattice attacks, public-key cryptography, side-channel attacks, timing attacks},
   1921         journal = {unknown},
   1922         issn = {2011/232},
   1923         url = {http://eprint.iacr.org/2011/232},
   1924         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brumley\%20\%26\%20Tuveri\%20-\%20Timing\%20Attacks.pdf},
   1925         author = {Billy Bob Brumley and Nicola Tuveri}
   1926 }
   1927 @conference {2011_12,
   1928         title = {Scalability \& Paranoia in a Decentralized Social Network},
   1929         booktitle = {Federated Social Web},
   1930         year = {2011},
   1931         month = jun,
   1932         address = {Berlin, Germany},
   1933         abstract = {There's a lot of buzz out there about "replacing" Facebook with a privacy-enhanced, decentralized, ideally open source something. In this talk we'll focus on how much privacy we should plan for (specifically about how we cannot entrust our privacy to modern virtual machine technology) and the often underestimated problem of getting such a monster network to function properly. These issues can be considered together or separately: Even if you're not as concerned about privacy as we are, the scalability problem still persists },
   1934         www_section = {GNUnet, privacy, social networks},
   1935         url = {https://secushare.org/2011-FSW-Scalability-Paranoia},
   1936         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2011-FSW-Scalability-Paranoia.pdf},
   1937         author = {Carlo v. Loesch and Gabor X Toth and Mathias Baumann}
   1938 }
   1939 @article {Murillo:2011:SCT:1938287.1938323,
   1940         title = {Schedule coordination through egalitarian recurrent multi-unit combinatorial auctions},
   1941         journal = {Applied Intelligence},
   1942         volume = {34},
   1943         number = {1},
   1944         year = {2011},
   1945         month = apr,
   1946         pages = {47--63},
   1947         publisher = {Kluwer Academic Publishers},
   1948         address = {Hingham, MA, USA},
   1949         abstract = {When selfish industries are competing for limited shared resources, they need to coordinate their activities to handle possible conflicting situations. Moreover, this coordination should not affect the activities already planned by the industries, since this could have negative effects on their performance. Although agents may have buffers that allow them to delay the use of resources, these are of a finite capacity, and therefore cannot be used indiscriminately. Thus, we are faced with the problem of coordinating schedules that have already been generated by the agents. To address this task, we propose to use a recurrent auction mechanism to mediate between the agents. Through this auction mechanism, the agents can express their interest in using the resources, thus helping the scheduler to find the best distribution. We also introduce a priority mechanism to add fairness to the coordination process. The proposed coordination mechanism has been applied to a waste water treatment system scenario, where different industries need to discharge their waste. We have simulated the behavior of the system, and the results show that using our coordination mechanism the waste water treatment plant can successfully treat most of the discharges, while the production activity of the industries is almost not affected by it},
   1950         www_section = {auction mechanisms, auctions, economy, egalitarism, schedule coordination},
   1951         issn = {0924-669X},
   1952         doi = {10.1007/s10489-009-0178-7},
   1953         url = {http://dx.doi.org/10.1007/s10489-009-0178-7},
   1954         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Applied\%20Intelligence\%20-\%20Combinatorial\%20Auctions.pdf},
   1955         author = {Murillo, Javier and Mu{\~n}oz, V{\'\i}ctor and Busquets, D{\'\i}dac and L{\'o}pez, Beatriz}
   1956 }
   1957 @article {2011_13,
   1958         title = {Secure collaborative supply chain planning and inverse optimization--The JELS model},
   1959         journal = {European Journal of Operations Research},
   1960         volume = {208},
   1961         year = {2011},
   1962         month = jan,
   1963         pages = {75--85 },
   1964         abstract = {It is a well-acknowledged fact that collaboration between different members of a supplychain yields a significant potential to increase overall supplychain performance. Sharing private information has been identified as prerequisite for collaboration and, at the same time, as one of its major obstacles. One potential avenue for overcoming this obstacle is Secure Multi-Party Computation (SMC). SMC is a cryptographic technique that enables the computation of any (well-defined) mathematical function by a number of parties without any party having to disclose its input to another party. In this paper, we show how SMC can be successfully employed to enable joint decision-making and benefit sharing in a simple supplychain setting. We develop secure protocols for implementing the well-known {\textquotedblleft}Joint Economic Lot Size (JELS) Model{\textquotedblright} with benefit sharing in such a way that none of the parties involved has to disclose any private (cost and capacity) data. Thereupon, we show that although computation of the model's outputs can be performed securely, the approach still faces practical limitations. These limitations are caused by the potential of {\textquotedblleft}inverseoptimization{\textquotedblright}, i.e., a party can infer another party's private data from the output of a collaborativeplanning scheme even if the computation is performed in a secure fashion. We provide a detailed analysis of {\textquotedblleft}inverseoptimization{\textquotedblright} potentials and introduce the notion of {\textquotedblleft}stochastic security{\textquotedblright}, a novel approach to assess the additional information a party may learn from joint computation and benefit sharing. Based on our definition of {\textquotedblleft}stochastic security{\textquotedblright} we propose a stochastic benefit sharing rule, develop a secure protocol for this benefit sharing rule, and assess under which conditions stochastic benefit sharing can guarantee secure collaboration},
   1965         www_section = {collaboration, information sharing, secure multi-party computation, SMC, supplychain management},
   1966         doi = {http://dx.doi.org/10.1016/j.ejor.2010.08.018},
   1967         url = {http://www.sciencedirect.com/science/article/pii/S0377221710005552},
   1968         author = {Richard Pibernik and Yingying Zhang and Florian Kerschbaum and Axel Schr{\"o}pfer}
   1969 }
   1970 @conference {conf/ndss/BackesMP11,
   1971         title = {A Security API for Distributed Social Networks},
   1972         booktitle = {NDSS'11--Proceedings of the Network and Distributed Security Symposium},
   1973         year = {2011},
   1974         month = feb,
   1975         publisher = {The Internet Society},
   1976         organization = {The Internet Society},
   1977         address = {San Diego, CA, USA},
   1978         abstract = {We present a cryptographic framework to achieve access control, privacy of social relations, secrecy of resources, and anonymity of users in social networks. We illustrate our technique on a core API for social networking, which
   1979 includes methods for establishing social relations and for sharing resources. The cryptographic protocols implementing these methods use pseudonyms to hide user identities, signatures on these pseudonyms to establish social relations, and zero-knowledge proofs of knowledge of such signatures to demonstrate the existence of social relations without sacrificing user anonymity. As we do not put any constraints on the underlying social network, our framework is generally applicable and, in particular, constitutes an ideal plug-in for decentralized social networks.
   1980 We analyzed the security of our protocols by developing formal definitions of the aforementioned security properties and by verifying them using ProVerif, an automated theorem prover for cryptographic protocols. Finally, we built a prototypical implementation and conducted an experimental evaluation to demonstrate the efficiency and the scalability of our framework},
   1981         www_section = {API, online-social-networks, security},
   1982         url = {http://www.lbs.cs.uni-saarland.de/publications/sapi.pdf },
   1983         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NDSS\%2711\%20-\%20Security\%20API\%20for\%20Distributed\%20Social\%20Networks.pdf},
   1984         author = {Michael Backes and Maffei, Matteo and Pecina, Kim}
   1985 }
   1986 @conference {DBLP:conf/sigecom/GhoshR11,
   1987         title = {Selling Privacy at Auction},
   1988         booktitle = {Selling Privacy at Auction},
   1989         year = {2011},
   1990         pages = {199--208},
   1991         url = {https://bibliography.gnunet.org},
   1992         www_section = unsorted,
   1993         author = {Arpita Ghosh and Aaron Roth}
   1994 }
   1995 @book {2011_14,
   1996         title = {Social Market: Combining Explicit and Implicit Social Networks},
   1997         booktitle = {Stabilization, Safety, and Security of Distributed Systems},
   1998         series = {Lecture Notes in Computer Science},
   1999         volume = {6976},
   2000         year = {2011},
   2001         pages = {193--207},
   2002         publisher = {Springer Berlin Heidelberg},
   2003         organization = {Springer Berlin Heidelberg},
   2004         abstract = {The pervasiveness of the Internet has lead research and applications to focus more and more on their users. Online social networks such as Facebook provide users with the ability to maintain an unprecedented number of social connections. Recommendation systems exploit the opinions of other users to suggest movies or products based on our similarity with them. This shift from machines to users motivates the emergence of novel applications and research challenges.
   2005 In this paper, we embrace the social aspects of the Web 2.0 by considering a novel problem. We build a distributed social market that combines interest-based social networks with explicit networks like Facebook. Our Social Market (SM) allows users to identify and build connections to other users that can provide interesting goods, or information. At the same time, it backs up these connections with trust, by associating them with paths of trusted users that connect new acquaintances through the explicit network. This convergence of implicit and explicit networks yields TAPS, a novel gossip protocol that can be applied in applications devoted to commercial transactions, or to add robustness to standard gossip applications like dissemination or recommendation systems},
   2006         isbn = {978-3-642-24549-7},
   2007         doi = {10.1007/978-3-642-24550-3_16},
   2008         url = {http://dx.doi.org/10.1007/978-3-642-24550-3_16},
   2009         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SocialMarket2011Frey.pdf},
   2010         www_section = unsorted,
   2011         author = {Frey, Davide and J{\'e}gou, Arnaud and Kermarrec, Anne-Marie},
   2012         editor = {D{\'e}fago, Xavier and Petit, Franck and Villain, Vincent}
   2013 }
   2014 @conference {ccs2011-stealthy,
   2015         title = {Stealthy Traffic Analysis of Low-Latency Anonymous Communication Using Throughput Fingerprinting},
   2016         booktitle = {CCS'11--Proceedings of the 18th ACM conference on Computer and Communications Security},
   2017         year = {2011},
   2018         month = oct,
   2019         publisher = {ACM},
   2020         organization = {ACM},
   2021         address = {Chicago, IL, United States},
   2022         abstract = {Anonymity systems such as Tor aim to enable users to communicate in a manner that is untraceable by adversaries that control a small number of machines. To provide efficient service to users, these anonymity systems make full use of forwarding capacity when sending traffic between intermediate relays. In this paper, we show that doing this leaks information about the set of Tor relays in a circuit (path). We present attacks that, with high confidence and based solely on throughput information, can (a) reduce the attacker's uncertainty about the bottleneck relay of any Tor circuit whose throughput can be observed, (b) exactly identify the guard relay(s) of a Tor user when circuit throughput can be observed over multiple connections, and (c) identify whether two concurrent TCP connections belong to the same Tor user, breaking unlinkability. Our attacks are stealthy, and cannot be readily detected by a user or by Tor relays. We validate our attacks using experiments over the live Tor network. We find that the attacker can substantially reduce the entropy of a bottleneck relay distribution of a Tor circuit whose throughput can be observed{\textemdash}the entropy gets
   2023 reduced by a factor of 2 in the median case.
   2024 Such information leaks from a single Tor circuit can be combined over multiple connections to exactly identify a user's guard relay(s). Finally, we are also able to link two connections from the same initiator with a crossover error rate of less
   2025 than 1.5\% in under 5 minutes. Our attacks are also more accurate and require fewer resources than previous attacks on Tor},
   2026         www_section = {anonymity, attacks, throughput},
   2027         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2711\%20-\%20Throughput-fingerprinting.pdf},
   2028         url = {https://bibliography.gnunet.org},
   2029         author = {Prateek Mittal and Ahmed Khurshid and Joshua Juen and Matthew Caesar and Borisov, Nikita}
   2030 }
   2031 @conference {2011_15,
   2032         title = {SWIRL: A Scalable Watermark to Detect Correlated Network Flows},
   2033         booktitle = {NDSS'11--Proceedings of the Network and Distributed Security Symposium},
   2034         year = {2011},
   2035         month = feb,
   2036         address = {San Diego, CA, USA},
   2037         abstract = {Flow watermarks are active traffic analysis techniques that help establish a causal connection between two network flows by content-independent manipulations, e.g., altering packet timings. Watermarks provide a much more
   2038 scalable approach for flow correlation than passive traffic analysis. Previous designs of scalable watermarks, however, were subject to multi-flow attacks. They also introduced delays too large to be used in most environments. We design SWIRL, a Scalable Watermark that is Invisible and Resilient to packet Losses. SWIRL is the first watermark that is practical to use for large-scale traffic analysis. SWIRL uses a flow-dependent approach to resist multi-flow
   2039 attacks, marking each flow with a different pattern. SWIRL is robust to packet losses and network jitter, yet it introduces only small delays that are invisible to both benign users and determined adversaries. We analyze the performance of SWIRL both analytically and on the PlanetLab testbed, demonstrating very low error rates. We consider applications of SWIRL to stepping stone detection and linking anonymous communication. We also propose a novel application of watermarks to defend against congestion attacks on Tor},
   2040         www_section = {anonymity, SWIRL, traffic analysis, watermarking},
   2041         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NDSS11-2.pdf},
   2042         url = {https://bibliography.gnunet.org},
   2043         author = {Amir Houmansadr and Borisov, Nikita}
   2044 }
   2045 @conference {usenix11-telex,
   2046         title = {Telex: Anticensorship in the Network Infrastructure},
   2047         booktitle = {Proceedings of the 20th USENIX Security Symposium},
   2048         year = {2011},
   2049         month = aug,
   2050         address = {San Francisco, CA, USA},
   2051         abstract = {In this paper, we present Telex, a new approach to resisting state-level Internet censorship. Rather than attempting to win the cat-and-mouse game of finding open proxies, we leverage censors' unwillingness to completely block day-to-day Internet access. In effect, Telex converts innocuous, unblocked websites into proxies, without their explicit collaboration. We envision that friendly ISPs would deploy Telex stations on paths between censors' networks and popular, uncensored Internet destinations. Telex stations would monitor seemingly innocuous flows for a special {\textquotedblleft}tag{\textquotedblright} and transparently divert them to a forbidden website or service instead. We propose a new cryptographic scheme based on elliptic curves for tagging TLS handshakes such that the tag is visible to a Telex
   2052 station but not to a censor. In addition, we use our tagging scheme to build a protocol that allows clients to connect to Telex stations while resisting both passive and active attacks. We also present a proof-of-concept implementation that demonstrates the feasibility of our system},
   2053         www_section = {anticensorship, network infrastructure state-level censorship, proxy, telex},
   2054         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Telex\%3A\%20Anticensorship\%20in\%20the\%20Network\%20Infrastructure.pdf},
   2055         url = {https://bibliography.gnunet.org},
   2056         author = {Eric Wustrow and Scott Wolchok and Ian Goldberg and J. Alex Halderman}
   2057 }
   2058 @conference {ccs2011-trust,
   2059         title = {Trust-based Anonymous Communication: Adversary Models and Routing Algorithms},
   2060         booktitle = {CCS'11--Proceedings of the 18th ACM conference on Computer and Communications Security},
   2061         year = {2011},
   2062         month = oct,
   2063         publisher = {ACM},
   2064         organization = {ACM},
   2065         address = {Chicago, IL, United States},
   2066         abstract = {We introduce a novel model of routing security that incorporates the ordinarily overlooked variations in trust that users have for different parts of the network. We focus on anonymous communication, and in particular onion routing, although we expect the approach to apply more broadly. This paper provides two main contributions. First, we present a novel model to consider the various security concerns for route selection in anonymity networks when users vary their trust over parts of the network. Second, to show the usefulness of our model, we present as an example a new algorithm to select paths in onion routing. We analyze its effectiveness against deanonymization and other information leaks, and particularly how it fares in our model versus existing algorithms, which do not consider trust. In contrast to those, we find that our trust-based routing strategy can protect anonymity against an adversary capable of attacking a significant fraction of the network},
   2067         www_section = {anonymous communication, onion routing, privacy, trust},
   2068         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2711\%20-\%20Trust-based\%20Anonymous\%20Communication1.pdf},
   2069         url = {https://bibliography.gnunet.org},
   2070         author = {Aaron Johnson and Paul Syverson and Roger Dingledine and Nick Mathewson}
   2071 }
   2072 @conference {Yang:2011:USN:2068816.2068841,
   2073         title = {Uncovering social network sybils in the wild},
   2074         booktitle = {Proceedings of the 2011 ACM SIGCOMM conference on Internet measurement conference},
   2075         series = {IMC '11},
   2076         year = {2011},
   2077         month = nov,
   2078         pages = {259--268},
   2079         publisher = {ACM},
   2080         organization = {ACM},
   2081         address = {Berlin, Germany},
   2082         abstract = {Sybil accounts are fake identities created to unfairly increase the power or resources of a single user. Researchers have long known about the existence of Sybil accounts in online communities such as file-sharing systems, but have not been able to perform large scale measurements to detect them or measure their activities. In this paper, we describe our efforts to detect, characterize and understand Sybil account activity in the Renren online social network (OSN). We use ground truth provided by Renren Inc. to build measurement based Sybil account detectors, and deploy them on Renren to detect over 100,000 Sybil accounts. We study these Sybil accounts, as well as an additional 560,000 Sybil accounts caught by Renren, and analyze their link creation behavior. Most interestingly, we find that contrary to prior conjecture, Sybil accounts in OSNs do not form tight-knit communities. Instead, they integrate into the social graph just like normal users. Using link creation timestamps, we verify that the large majority of links between Sybil accounts are created accidentally, unbeknownst to the attacker. Overall, only a very small portion of Sybil accounts are connected to other Sybils with social links. Our study shows that existing Sybil defenses are unlikely to succeed in today's OSNs, and we must design new techniques to effectively detect and defend against Sybil attacks},
   2083         www_section = {online social networks, sybil, sybil accountsm},
   2084         isbn = {978-1-4503-1013-0},
   2085         doi = {10.1145/2068816.2068841},
   2086         url = {http://doi.acm.org/10.1145/2068816.2068841},
   2087         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2711\%20-\%20Uncovering\%20social\%20network\%20sybils.pdf},
   2088         author = {Yang, Zhi and Wilson, Christo and Wang, Xiao and Gao, Tingting and Ben Y. Zhao and Dai, Yafei}
   2089 }
   2090 @conference {wpes11-panchenko,
   2091         title = {Website Fingerprinting in Onion Routing Based Anonymization Networks},
   2092         booktitle = {WPES'11--Proceedings of the Workshop on Privacy in the Electronic Society},
   2093         year = {2011},
   2094         month = oct,
   2095         publisher = {ACM},
   2096         organization = {ACM},
   2097         address = {Chicago, IL, United States},
   2098         abstract = {Low-latency anonymization networks such as Tor and JAP claim to hide the recipient and the content of communications from a local observer, i.e., an entity that can eavesdrop the traffic between the user and the first anonymization node. Especially users in totalitarian regimes strongly depend on such networks to freely communicate. For these people, anonymity is particularly important and an analysis of the anonymization methods against various attacks is necessary to ensure adequate protection. In this paper we show that anonymity in Tor and JAP is not as strong as expected so far and cannot resist website fingerprinting attacks under certain circumstances. We first define features for website
   2099 fingerprinting solely based on volume, time, and direction of the traffic. As a result, the subsequent classification becomes much easier. We apply support vector machines with the introduced features. We are able to improve recognition
   2100 results of existing works on a given state-of-the-art dataset in Tor from 3\% to 55\% and in JAP from 20\% to 80\%. The datasets assume a closed-world with 775 websites only. In a next step, we transfer our findings to a more complex and realistic open-world scenario, i.e., recognition of several websites in a set of thousands of random unknown websites. To the best of our knowledge, this work is the first successful attack in the open-world scenario. We achieve a surprisingly high true positive rate of up to 73\% for a false positive rate of 0.05\%. Finally, we show preliminary results of a proof-of-concept implementation that applies camouflage as a countermeasure to hamper the fingerprinting attack. For
   2101 JAP, the detection rate decreases from 80\% to 4\% and for Tor it drops from 55\% to about 3\%},
   2102         www_section = {anonymous communication, pattern recognition, privacy, traffic analysis, website fingerprinting},
   2103         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2711\%20-\%20Fingerprinting.pdf},
   2104         url = {https://bibliography.gnunet.org},
   2105         author = {Andriy Panchenko and Lukas Niessen and Andreas Zinnen and Thomas Engel}
   2106 }
   2107 @conference {Eppstein:2011:WDE:2018436.2018462,
   2108         title = {What's the difference?: efficient set reconciliation without prior context},
   2109         booktitle = {Proceedings of the ACM SIGCOMM 2011 conference},
   2110         series = {SIGCOMM '11},
   2111         year = {2011},
   2112         pages = {218--229},
   2113         publisher = {ACM},
   2114         organization = {ACM},
   2115         address = {New York, NY, USA},
   2116         www_section = {difference digest, GNUnet, invertible bloom filter, set difference},
   2117         isbn = {978-1-4503-0797-0},
   2118         doi = {10.1145/2018436.2018462},
   2119         url = {http://doi.acm.org/10.1145/2018436.2018462},
   2120         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EppGooUye-SIGCOMM-11.pdf},
   2121         author = {Eppstein, David and Goodrich, Michael T. and Uyeda, Frank and Varghese, George}
   2122 }
   2123 @article {journals/corr/abs-1109-0971,
   2124         title = {X-Vine: Secure and Pseudonymous Routing Using Social Networks},
   2125         journal = {Computer Research Repository},
   2126         volume = {abs/1109.0971},
   2127         year = {2011},
   2128         month = sep,
   2129         abstract = {Distributed hash tables suffer from several security and privacy vulnerabilities, including the problem of Sybil attacks. Existing social network-based solutions to mitigate the Sybil attacks in DHT routing have a high state requirement and do not provide an adequate level of privacy. For instance, such techniques require a user to reveal their social network contacts. We design X-Vine, a protection mechanism for distributed hash tables that operates entirely by communicating over social network links. As with traditional peer-to-peer systems, X-Vine provides robustness, scalability, and a platform for innovation. The use of social network links for communication helps protect participant privacy and adds a new dimension of trust absent from previous designs. X-Vine is resilient to denial of service via Sybil attacks, and in fact is the first Sybil defense that requires only a logarithmic amount of state per node, making it suitable for large-scale and dynamic settings. X-Vine also helps protect the privacy of users social network contacts and keeps their IP addresses hidden from those outside of their social circle, providing a basis for pseudonymous communication. We first evaluate our design with analysis and simulations, using several real world large-scale social networking topologies. We show that the constraints of X-Vine allow the insertion of only a logarithmic number of Sybil identities per attack edge; we show this mitigates the impact of malicious attacks while not affecting the performance of honest nodes. Moreover, our algorithms are efficient, maintain low stretch, and avoid hot spots in the network. We validate our design with a PlanetLab implementation and a Facebook plugin},
   2130         www_section = {anonymity, cryptography, dblp, distributed hash table, for:isp, routing, security, social-network-routing},
   2131         url = {http://dblp.uni-trier.de/db/journals/corr/corr1109.html$\#$abs-1109-0971},
   2132         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoRR\%20-\%20X-Vine.pdf},
   2133         author = {Prateek Mittal and Matthew Caesar and Borisov, Nikita}
   2134 }
   2135 @conference {2011_16,
   2136         title = {"You Might Also Like:" Privacy Risks of Collaborative Filtering},
   2137         booktitle = {Security and Privacy (SP), 2011 IEEE Symposium on},
   2138         year = {2011},
   2139         month = {May},
   2140         abstract = {Many commercial websites use recommender systems to help customers locate products and content. Modern recommenders are based on collaborative filtering: they use patterns learned from users' behavior to make recommendations, usually in the form of related-items lists. The scale and complexity of these systems, along with the fact that their outputs reveal only relationships between items (as opposed to information about users), may suggest that they pose no meaningful privacy risk.
   2141 In this paper, we develop algorithms which take a moderate amount of auxiliary information about a customer and infer this customer's transactions from temporal changes in the public outputs of a recommender system. Our inference attacks are passive and can be carried out by any Internet user. We evaluate their feasibility using public data from popular websites Hunch, Last.fm, LibraryThing, and Amazon},
   2142         www_section = {accuracy, Amazon, collaboration, collaborative filtering, commercial Web sites, consumer behaviour, Covariance matrix, customer transactions, data privacy, groupware, History, Hunch, Inference algorithms, inference attacks, inference mechanisms, information filtering, Internet, Internet user, Last.fm, Library Thing, privacy, privacy risks, recommender systems, Web sites},
   2143         doi = {10.1109/SP.2011.40},
   2144         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Youmightlike2011Calandrino.pdf},
   2145         url = {https://bibliography.gnunet.org},
   2146         author = {Calandrino, J.A. and Kilzer, A. and Narayanan, A. and Felten, E.W. and Shmatikov, V.}
   2147 }
   2148 @mastersthesis {bartsthesis,
   2149         title = {Adapting Blackhat Approaches to Increase the Resilience of Whitehat Application Scenarios},
   2150         volume = {M.S},
   2151         year = {2010},
   2152         school = {Technische Universit{\"a}t M{\"u}nchen},
   2153         type = {masters},
   2154         address = {M{\"u}nchen},
   2155         www_section = {Botnet, distributed hash table, GNUnet},
   2156         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Polot2010.pdf},
   2157         url = {https://bibliography.gnunet.org},
   2158         author = {Polot, Bartlomiej}
   2159 }
   2160 @conference {2010_0,
   2161         title = {Application of Random Walks to Decentralized Recommender Systems},
   2162         booktitle = {14th International Conference on Principles of Distributed Systems},
   2163         year = {2010},
   2164         month = sep,
   2165         www_section = {random walks, recommender system},
   2166         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/opodis10_HAL.pdf},
   2167         url = {https://bibliography.gnunet.org},
   2168         author = {Anne-Marie Kermarrec and Vincent Leroy and Afshin Moin and Christopher Thraves}
   2169 }
   2170 @article {2010_1,
   2171         title = {The Ariba Framework for Application Development using Service Overlays},
   2172         journal = {Praxis der Informationsverarbeitung und Kommunikation},
   2173         volume = {33},
   2174         year = {2010},
   2175         pages = {7--11},
   2176         abstract = {Developing new network services in the Internet is complex and costly. This high entrance barrier has prevented new innovation in the network itself, and stuck the Internet as being mainly browser-based client/server systems. End-system based decentralized services are cheaper, but have a complexity several orders of magnitude higher than centralized systems in terms of structure and protocols. To foster development of such decentralized network services, we present the ariba framework. We show how ariba can facilitate development of end-system based decentralized services through self-organizing service overlays--flexibly deployed purely on end-systems without the need for costly infrastructure},
   2177         www_section = {overlay networks},
   2178         issn = {1865-8342},
   2179         doi = {10.1515/piko.2010.003},
   2180         url = {http://www.reference-global.com/doi/abs/10.1515/piko.2010.003},
   2181         author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}
   2182 }
   2183 @conference {2010_2,
   2184         title = {Autonomous NAT Traversal},
   2185         booktitle = {10th IEEE International Conference on Peer-to-Peer Computing (IEEE P2P'10)},
   2186         year = {2010},
   2187         publisher = {IEEE},
   2188         organization = {IEEE},
   2189         address = {Delft, The Netherlands},
   2190         abstract = {Traditional NAT traversal methods require the help of a third party for signalling.  This paper investigates a new autonomous
   2191 method for establishing connections to peers behind NAT.  The proposed method for Autonomous NAT traversal uses fake ICMP messages to initially contact the NATed peer.  This paper presents how the method is supposed to work in theory, discusses some possible variations, introduces various concrete implementations of the proposed approach and evaluates empirical results of a measurement study designed to evaluate the efficacy of the idea in practice},
   2192         www_section = {GNUnet, ICMP, NAT, P2P},
   2193         url = {http://grothoff.org/christian/pwnat.pdf},
   2194         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pwnat.pdf},
   2195         author = {Andreas  M{\"u}ller and Nathan S Evans and Christian Grothoff and Samy Kamkar}
   2196 }
   2197 @article {Yeoh:2008:BAB:1402298.1402307,
   2198         title = {BnB-ADOPT: an asynchronous branch-and-bound DCOP algorithm},
   2199         journal = {Journal of Artificial Intelligence Research},
   2200         volume = {38},
   2201         year = {2010},
   2202         pages = {85--133},
   2203         publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
   2204         address = {Richland, SC},
   2205         abstract = {Distributed constraint optimization (DCOP) problems are a popular way of formulating and solving agent-coordination problems. It is often desirable to solve DCOP problems optimally with memory-bounded and asynchronous algorithms. We introduce Branch-and-Bound ADOPT (BnB-ADOPT), a memory-bounded asynchronous DCOP algorithm that uses the message passing and communication framework of ADOPT, a well known memory-bounded asynchronous DCOP algorithm, but changes the search strategy of ADOPT from best-first search to depth-first branch-and-bound search. Our experimental results show that BnB-ADOPT is up to one order of magnitude faster than ADOPT on a variety of large DCOP problems and faster than NCBB, a memory-bounded synchronous DCOP algorithm, on most of these DCOP problems},
   2206         www_section = {agent cooperation, BnB-ADOPT, DCOP, distributed constraint optimization, distributed problem solving},
   2207         issn = {1076-9757},
   2208         doi = {10.1613/jair.2849},
   2209         url = {http://www.jair.org/papers/paper2849.html},
   2210         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20AI\%20-\%20BnB-ADOPT.pdf},
   2211         author = {Yeoh, William and Felner, Ariel and Koenig, Sven}
   2212 }
   2213 @conference {incentives-fc10,
   2214         title = {Building Incentives into Tor},
   2215         booktitle = {Proceedings of Financial Cryptography (FC '10)},
   2216         year = {2010},
   2217         month = jan,
   2218         abstract = {Distributed anonymous communication networks like Tor depend on volunteers to donate their resources. However, the efforts of Tor volunteers have not grown as fast as the demands on the Tor network.We explore techniques to incentivize Tor users to relay Tor traffic too; if users contribute resources to the Tor overlay, they should receive faster service in return. In our design, the central Tor directory authorities measure performance and publish a list of Tor relays that should be given higher priority when establishing circuits. Simulations of our proposed design show that conforming users receive significant improvements in performance, in some cases experiencing twice the network throughput of selfish users who do not relay traffic for the Tor network},
   2219         www_section = {Tor},
   2220         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/incentives-fc10.pdf},
   2221         url = {https://bibliography.gnunet.org},
   2222         author = {Tsuen-Wan {\textquoteleft}{\textquoteleft}Johnny'' Ngan and Roger Dingledine and Dan S. Wallach},
   2223         editor = {Radu Sion}
   2224 }
   2225 @conference {1827424,
   2226         title = {Cordies: expressive event correlation in distributed systems},
   2227         booktitle = {DEBS '10: Proceedings of the Fourth ACM International Conference on Distributed Event-Based Systems},
   2228         year = {2010},
   2229         pages = {26--37},
   2230         publisher = {ACM},
   2231         organization = {ACM},
   2232         address = {New York, NY, USA},
   2233         abstract = {Complex Event Processing (CEP) is the method of choice for the observation of system states and situations by means of events. A number of systems have been introduced that provide CEP in selected environments. Some are restricted to centralised systems, or to systems with synchronous communication, or to a limited space of event relations that are defined in advance. Many modern systems, though, are inherently distributed and asynchronous, and require a more powerful CEP. We present Cordies, a distributed system for the detection of correlated events that is designed for the operation in large-scale, heterogeneous networks and adapts dynamically to changing network conditions. With its expressive language to describe event relations, it is suitable for environments where neither the event space nor the situations of interest are predefined but are constantly adapted. In addition, Cordies supports Quality-of-Service (QoS) for communication in distributed event correlation detection},
   2234         www_section = {QoS},
   2235         isbn = {978-1-60558-927-5},
   2236         doi = {10.1145/1827418.1827424},
   2237         url = {http://portal.acm.org/citation.cfm?id=1827424\&dl=GUIDE\&coll=portal\&CFID=97675623\&CFTOKEN=70931453$\#$},
   2238         author = {Gerald G. Koch and Boris Koldehofe and Kurt Rothermel}
   2239 }
   2240 @booklet {cryptoeprint:2010:264,
   2241         title = {Cryptographic Extraction and Key Derivation: The HKDF Scheme},
   2242         year = {2010},
   2243         note = {\url{http://eprint.iacr.org/}},
   2244         abstract = {In spite of the central role of key derivation functions (KDF) in applied cryptography, there has been little formal work addressing the design and analysis of general multi-purpose KDFs. In practice, most KDFs (including those widely standardized) follow ad-hoc approaches that treat cryptographic hash functions as perfectly random functions. In this paper we close some gaps between theory and practice by contributing to the study and engineering of KDFs in several ways. We provide detailed rationale for the design of KDFs based on the extract-then-expand approach; we present the first general and rigorous definition of KDFs and their security which we base on the notion of computational extractors; we specify a concrete fully practical KDF based on the HMAC construction; and we provide an analysis of this construction based on the extraction and pseudorandom properties of HMAC. The resultant KDF design can support a large variety of KDF applications under suitable assumptions on the underlying hash function; particular attention and effort is devoted to minimizing these assumptions as much as possible for each usage scenario.
   2245 
   2246 Beyond the theoretical interest in modeling KDFs, this work is intended to address two important and timely needs of cryptographic applications: (i) providing a single hash-based KDF design that can be standardized for use in multiple and diverse applications, and (ii) providing a conservative, yet efficient, design that exercises much care in the way it utilizes a cryptographic hash function.
   2247 
   2248 (The HMAC-based scheme presented here, named HKDF, is being standardized by the IETF.)},
   2249         www_section = {GNUnet, HKDF, HMAC, key derivation},
   2250         url = {http://eprint.iacr.org/2010/264},
   2251         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/264.pdf},
   2252         publisher = {unknown},
   2253         author = {Hugo Krawczyk}
   2254 }
   2255 @mastersthesis {2010_3,
   2256         title = {Developing Peer-to-Peer Web Applications},
   2257         volume = {M.S},
   2258         year = {2010},
   2259         month = sep,
   2260         pages = {0--66},
   2261         school = {University of Helsinki},
   2262         type = {Master's Thesis},
   2263         address = {Helsinki},
   2264         abstract = {As the virtual world grows more complex, finding a standard way for storing data becomes increasingly important. Ideally, each data item would be brought into the computer system only once. References for data items need to be cryptographically verifiable, so the data can maintain its identity while being passed around. This way there will be only one copy of the users family photo album, while the user can use multiple tools to show or manipulate the album. Copies of users data could be stored on some of his family members computer, some of his computers, but also at some online services which he uses. When all actors operate over one replicated copy of the data, the system automatically avoids a single point of failure. Thus the data will not disappear with one computer breaking, or one service provider going out of business. One shared copy also makes it possible to delete a piece of data from all systems at once, on users request. In our research we tried to find a model that would make data manageable to users, and make it possible to have the same data stored at various locations. We studied three systems, Persona, Freenet, and GNUnet, that suggest different models for protecting user data. The main application areas of the systems studied include securing online social networks, providing anonymous web, and preventing censorship in file-sharing. Each of the systems studied store user data on machines belonging to third parties. The systems differ in measures they take to protect their users from data loss, forged information, censorship, and being monitored. All of the systems use cryptography to secure names used for the content, and to protect the data from outsiders. Based on the gained knowledge, we built a prototype platform called Peerscape, which stores user data in a synchronized, protected database. Data items themselves are protected with cryptography against forgery, but not encrypted as the focus has been disseminating the data directly among family and friends instead of letting third parties store the information. We turned the synchronizing database into peer-to-peer web by revealing its contents through an integrated http server. The REST-like http API supports development of applications in javascript. To evaluate the platform's suitability for application development we wrote some simple applications, including a public chat room, bittorrent site, and a flower growing game. During our early tests we came to the conclusion that using the platform for simple applications works well. As web standards develop further, writing applications for the platform should become easier. Any system this complex will have its problems, and we are not expecting our platform to replace the existing web, but are fairly impressed with the results and consider our work important from the perspective of managing user data},
   2265         www_section = {content centric, ECRS, Freenet, GNUnet, P2P, Peerscape, Persona},
   2266         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/twr-dp2pwa.pdf},
   2267         url = {https://bibliography.gnunet.org},
   2268         author = {Toni Ruottu}
   2269 }
   2270 @conference {continual,
   2271         title = {Differential Privacy Under Continual Observation},
   2272         booktitle = {Proceedings of the 42nd ACM Symposium on Theory of Computing (STOC'10)},
   2273         year = {2010},
   2274         month = {June},
   2275         pages = {715--724},
   2276         url = {https://bibliography.gnunet.org},
   2277         www_section = unsorted,
   2278         author = {Dwork, Cynthia and Naor, Moni and Pitassi, Toniann and Rothblum, Guy N.}
   2279 }
   2280 @article {2010_4,
   2281         title = {On the Difficulties of Disclosure Prevention in Statistical Databases or The Case for Differential Privacy},
   2282         journal = {Journal of Privacy and Confidentiality},
   2283         volume = {2},
   2284         year = {2010},
   2285         pages = {93--107},
   2286         abstract = {In 1977 Tore Dalenius articulated a desideratum for statistical databases: nothing about an individual should be learnable from the database that cannot be learned without access to the database. We give a general impossibility result showing that a natural formalization of Dalenius' goal cannot be achieved if the database is useful. The key obstacle is the side information that may be available to an adversary. Our results hold under very general conditions regarding the database, the notion of privacy violation, and the notion of utility.</p> <p>Contrary to intuition, a variant of the result threatens the privacy even of someone not in the database. This state of affairs motivated the notion of differential privacy [15, 16], a strong ad omnia privacy which, intuitively, captures the increased risk to one's privacy incurred by participating in a database},
   2287         url = {http://research.microsoft.com/apps/pubs/default.aspx?id=135704},
   2288         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DisclousrePrevention2010Dwork.pdf},
   2289         www_section = unsorted,
   2290         author = {Cynthia Dwork and Moni Naor}
   2291 }
   2292 @mastersthesis {vleroythesis,
   2293         title = {Distributing social applications},
   2294         year = {2010},
   2295         month = dec,
   2296         school = {IRISA},
   2297         type = {phd},
   2298         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributingSocialApp2010Leroy.pdf},
   2299         www_section = unsorted,
   2300         url = {https://bibliography.gnunet.org},
   2301         author = {Vincent Leroy}
   2302 }
   2303 @book {2010_5,
   2304         title = {Drac: An Architecture for Anonymous Low-Volume Communications},
   2305         booktitle = {Privacy Enhancing Technologies},
   2306         series = {Lecture Notes in Computer Science},
   2307         volume = {6205},
   2308         year = {2010},
   2309         pages = {202--219},
   2310         publisher = {Springer Berlin Heidelberg},
   2311         organization = {Springer Berlin Heidelberg},
   2312         www_section = {anonymous communication, anonymous IM, anonymous voice, Drac, F2F},
   2313         isbn = {978-3-642-14526-1},
   2314         doi = {10.1007/978-3-642-14527-8_12},
   2315         url = {http://dx.doi.org/10.1007/978-3-642-14527-8_12},
   2316         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/drac-pet2010.pdf},
   2317         author = {Danezis, George and Claudia Diaz and Troncoso, Carmela and Laurie, Ben},
   2318         editor = {Atallah, MikhailJ. and Hopper, Nicholas J}
   2319 }
   2320 @conference {2010_6,
   2321         title = {Efficient DHT attack mitigation through peers' ID distribution },
   2322         booktitle = {HOTP2P'10--International Workshop on Hot Topics in Peer-to-Peer Systems},
   2323         year = {2010},
   2324         month = apr,
   2325         address = {Atlanta, Georgia, USA},
   2326         abstract = {We present a new solution to protect the widely deployed KAD DHT against localized attacks which can take control over DHT entries. We show through measurements that the IDs distribution of the best peers found after a lookup
   2327 process follows a geometric distribution. We then use this result to detect DHT attacks by comparing real peers' ID distributions to the theoretical one thanks to the Kullback-Leibler divergence. When an attack is detected, we propose countermeasures that progressively remove suspicious peers from the list of possible contacts to provide a safe DHT access. Evaluations show that our
   2328 method detects the most efficient attacks with a very small false-negative rate, while countermeasures successfully filter almost all malicious peers involved in an attack. Moreover, our solution completely fits the current design of the KAD network and introduces no network overhead},
   2329         www_section = {attack detection, attack mitigation, distributed hash table, IDs distribution, KAD, Sybil attack},
   2330         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotP2P\%2710\%20-\%20KAD\%20DHT\%20attack\%20mitigation.pdf},
   2331         url = {https://bibliography.gnunet.org},
   2332         author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}
   2333 }
   2334 @conference {Koch:2010:EPL:1827418.1827440,
   2335         title = {Event processing for large-scale distributed games},
   2336         booktitle = {Proceedings of the Fourth ACM International Conference on Distributed Event-Based Systems},
   2337         series = {DEBS '10},
   2338         year = {2010},
   2339         pages = {103--104},
   2340         publisher = {ACM},
   2341         organization = {ACM},
   2342         address = {New York, NY, USA},
   2343         abstract = {Novel peer-to-peer-based multiplayer online games are instantiated in an ad-hoc manner without the support of dedicated infrastructure and maintain their state in a distributed manner. Although their employed communication paradigms provide efficient access to sections of distributed state, such communication fails if the participants need to access large subsets of the application state in order to detect high-level situations. We propose a demonstration that shows how multiplayer online games can benefit from using publish/subscribe communication and complex event processing alongside their traditional communication paradigm},
   2344         www_section = {content-based publish/subscribe, distributed complex event processing, multi-player online game},
   2345         isbn = {978-1-60558-927-5},
   2346         doi = {http://doi.acm.org/10.1145/1827418.1827440},
   2347         url = {http://doi.acm.org/10.1145/1827418.1827440},
   2348         author = {Gerald G. Koch and Tariq, Muhammad Adnan and Boris Koldehofe and Kurt Rothermel}
   2349 }
   2350 @conference {DBLP:conf/middleware/BertierFGKL10,
   2351         title = {The Gossple Anonymous Social Network},
   2352         booktitle = { Proceedings of the ACM/IFIP/USENIX 11th International Conference on Middleware },
   2353         year = {2010},
   2354         pages = {191--211},
   2355         publisher = { ACM/IFIP/USENIX},
   2356         organization = { ACM/IFIP/USENIX},
   2357         abstract = {While social networks provide news from old buddies, you can learn a lot more from people you do not know, but with whom you share many interests. We show in this paper how to build a network of anonymous social acquaintances using a gossip protocol we call Gossple, and how to leverage such a network to enhance navigation within Web 2.0 collaborative applications, {\`a} la LastFM and Delicious. Gossple nodes (users) periodically gossip digests of their interest profiles and compute their distances (in terms of interest) with respect to other nodes. This is achieved with little bandwidth and storage, fast convergence, and without revealing which profile is associated with which user. We evaluate Gossple on real traces from various Web 2.0 applications with hundreds of PlanetLab hosts and thousands of simulated nodes},
   2358         www_section = {gossple, social networks},
   2359         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gossple2010Bertier.pdf},
   2360         url = {https://bibliography.gnunet.org},
   2361         author = {Marin Bertier and Davide Frey and Rachid Guerraoui and Anne-Marie Kermarrec and Vincent Leroy}
   2362 }
   2363 @article { duminuco:hierarchical,
   2364         title = {Hierarchical codes: A flexible trade-off for erasure codes in peer-to-peer storage systems},
   2365         journal = {Peer-to-Peer Networking and Applications},
   2366         volume = {3},
   2367         year = {2010},
   2368         month = mar,
   2369         pages = {52--66},
   2370         abstract = {Redundancy is the basic technique to provide reliability in storage systems consisting of multiple components. A redundancy scheme defines how the redundant data are produced and maintained. The simplest redundancy scheme is replication, which however suffers from storage inefficiency. Another approach is erasure coding, which provides the same level of reliability as replication using a significantly smaller amount of storage. When redundant data are lost, they need to be replaced. While replacing replicated data consists in a simple copy, it becomes a complex operation with erasure codes: new data are produced performing a coding over some other available data. The amount of data to be read and coded is d times larger than the amount of data produced, where d, called repair degree, is larger than 1 and depends on the structure of the code. This implies that coding has a larger computational and I/O cost, which, for distributed storage systems, translates into increased network traffic. Participants of Peer-to-Peer systems often have ample storage and CPU power, but their network bandwidth may be limited. For these reasons existing coding techniques are not suitable for P2P storage. This work explores the design space between replication and the existing erasure codes. We propose and evaluate a new class of erasure codes, called Hierarchical Codes, which allows to reduce the network traffic due to maintenance without losing the benefits given by traditional erasure codes},
   2371         www_section = {dependability, erasure codes, peer-to-peer networking, reliability, storage},
   2372         doi = {10.1007/s12083-009-0044-8},
   2373         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Duminuco\%20\%26\%20Biersack\%20-\%20Hierarchical\%20Codes.pdf},
   2374         url = {https://bibliography.gnunet.org},
   2375         author = {Alessandro Duminuco and E W Biersack}
   2376 }
   2377 @conference {2010_7,
   2378         title = {How Accurately Can One's Interests Be Inferred from Friends?},
   2379         booktitle = {Proceedings of the 19th International Conference on World Wide Web},
   2380         year = {2010},
   2381         publisher = {ACM},
   2382         organization = {ACM},
   2383         address = {New York, NY, USA},
   2384         abstract = {Search and recommendation systems must effectively model user interests in order to provide personalized results. The proliferation of social software makes social network an increasingly important source for user interest modeling, be-
   2385 cause of the social influence and correlation among friends. However, there are large variations in people's contribution of social content. Therefore, it is impractical to accurately model interests for all users. As a result, applications need to decide whether to utilize a user interest model based on its
   2386 accuracy. To address this challenge, we present a study on the accuracy of user interests inferred from three types of social content: social bookmarking, file sharing, and electronic communication, in an organizational social network within a large-scale enterprise. First, we demonstrate that combining different types of social content to infer user interests
   2387 outperforms methods that use only one type of social content. Second, we present a technique to predict the inference accuracy based on easily observed network characteristics, including user activeness, network in-degree, out-degree, and betweenness centrality},
   2388         www_section = {accuracy, social networks, user modeling},
   2389         isbn = {978-1-60558-799-8},
   2390         doi = {10.1145/1772690.1772875},
   2391         url = {http://doi.acm.org/10.1145/1772690.1772875},
   2392         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/InterestsInference2010Wen.pdf},
   2393         author = {Wen, Zhen and Lin, Ching-Yung}
   2394 }
   2395 @article {tissec-latency-leak,
   2396         title = {How Much Anonymity does Network Latency Leak?},
   2397         journal = {ACM Transactions on Information and System Security},
   2398         year = {2010},
   2399         month = jan,
   2400         pages = {82--91},
   2401         abstract = {Low-latency anonymity systems such as Tor, AN.ON, Crowds, and Anonymizer.com aim to provide anonymous connections that are both untraceable by "local" adversaries who control only a few machines, and have low enough delay to support anonymous use of network services like web browsing and remote login. One consequence of these goals is that these services leak some information about the network latency between the sender and one or more nodes in the system. This paper reports on three experiments that partially measure the extent to which such leakage can compromise anonymity. First, using a public dataset of pairwise round-trip times (RTTs) between 2000 Internet hosts, we estimate that on average, knowing the network location of host A and the RTT to host B leaks 3.64 bits of information about the network location of B. Second, we describe an attack that allows a pair of colluding web sites to predict, based on local timing information and with no additional resources, whether two connections from the same Tor exit node are using the same circuit with 17\% equal error rate. Finally, we describe an attack that allows a malicious website, with access to a network coordinate system and one corrupted Tor router, to recover roughly 6.8 bits of network location per hour},
   2402         www_section = {anonymity, latency, Tor},
   2403         isbn = {978-1-59593-703-2},
   2404         doi = {10.1145/1315245.1315257},
   2405         url = {http://portal.acm.org/citation.cfm?id=1315245.1315257},
   2406         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tissec-latency-leak.pdf},
   2407         author = {Nicholas J. Hopper and Eugene Y. Vasserman and Eric Chan-Tin}
   2408 }
   2409 @conference {DBLP:conf/tridentcom/NguyenRKFMB10,
   2410         title = {How to Build Complex, Large-Scale Emulated Networks},
   2411         booktitle = {TRIDENTCOM},
   2412         year = {2010},
   2413         pages = {3--18},
   2414         www_section = {autonetkit, emulation, netkit, network, testbed, virtualization},
   2415         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AutoNetkit_0.pdf},
   2416         url = {https://bibliography.gnunet.org},
   2417         author = {Hung X. Nguyen and Roughan, Matthew and Knight, Simon and Nick Falkner and Maennel, Olaf and Randy Bush}
   2418 }
   2419 @mastersthesis {2010_8,
   2420         title = {Incentive-driven QoS in peer-to-peer overlays},
   2421         volume = {PhD},
   2422         year = {2010},
   2423         month = may,
   2424         pages = {0--209},
   2425         school = {University College London},
   2426         address = {London},
   2427         abstract = {A well known problem in peer-to-peer overlays is that no single entity has control over the software, hardware and configuration of peers. Thus, each peer can selfishly adapt its behaviour to maximise its benefit from the overlay. This thesis is concerned with the modelling and design of incentive mechanisms for QoS-overlays: resource allocation protocols that provide strategic peers with participation incentives, while at the same time optimising the performance of the peer-to-peer distribution overlay. The contributions of this thesis are as follows. First, we present PledgeRoute, a novel contribution accounting system that can be used, along with a set of reciprocity policies, as an incentive mechanism to encourage peers to contribute resources even when users are not actively consuming overlay services. This mechanism uses a decentralised credit network, is resilient to sybil attacks, and allows peers to achieve time and space deferred contribution reciprocity. Then, we present a novel, QoS-aware resource allocation model based on Vickrey auctions that uses PledgeRoute as a substrate. It acts as an incentive mechanism by providing efficient overlay construction, while at the same time allocating increasing service quality to those peers that contribute more to the network. The model is then applied to lagsensitive chunk swarming, and some of its properties are explored for different peer delay distributions. When considering QoS overlays deployed over the best-effort Internet, the quality received by a client cannot be adjudicated completely to either its serving peer or the intervening network between them. By drawing parallels between this situation and well-known hidden action situations in microeconomics, we propose a novel scheme to ensure adherence to advertised QoS levels. We then apply it to delay-sensitive chunk distribution overlays and present the optimal contract payments required, along with a method for QoS contract enforcement through reciprocative strategies. We also present a probabilistic model for application-layer delay as a function of the prevailing network conditions. Finally, we address the incentives of managed overlays, and the prediction of their behaviour. We propose two novel models of multihoming managed overlay incentives in which overlays can freely allocate their traffic flows between different ISPs. One is obtained by optimising an overlay utility function with desired properties, while the other is designed for data-driven least-squares fitting of the cross elasticity of demand. This last model is then used to solve for ISP profit maximisation},
   2428         www_section = {BitTorrent, Freeloading, game theory, incentives, PeerLive, prices, QoS},
   2429         url = {http://eprints.ucl.ac.uk/19490/},
   2430         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/19490.pdf},
   2431         author = {Raul Leonardo Landa Gamiochipi}
   2432 }
   2433 @article {2010_9,
   2434         title = {Malugo: A peer-to-peer storage system},
   2435         year = {2010},
   2436         abstract = {We consider the problem of routing locality in peer-to-peer storage systems where peers store and exchange data among themselves. With the global information, peers will take the data locality into consideration when they implement their replication mechanisms to keep a number of file replicas all over the systems. In this paper, we mainly propose a peer-to-peer storage system--Malugo. Algorithms for the implementation of the peers' locating and file operation processes are also presented. Simulation results show that the proposed system successfully constructs an efficient and stable peer-to-peer storage environment with considerations of data and routing locality among peers},
   2437         www_section = {distributed storage, Malugo, peer-to-peer storage},
   2438         journal = {unknown},
   2439         doi = {10.1504/IJAHUC.2010.032995},
   2440         url = {http://www.ingentaconnect.com/content/ind/ijahuc/2010/00000005/00000004/art00002;jsessionid=kcpun0o76hoe.alexandra},
   2441         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Malugo.pdf},
   2442         author = {Chan, Yu-Wei and Ho, Tsung-Hsuan and Shih, Po-Chi and Chung, Yeh-Ching}
   2443 }
   2444 @conference {DBLP:conf/tridentcom/AlbrechtH10,
   2445         title = {Managing Distributed Applications Using Gush},
   2446         booktitle = {TRIDENTCOM},
   2447         year = {2010},
   2448         pages = {401--411},
   2449         www_section = {distributed applications, emulation, GENI, PlanetLab, testbed},
   2450         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gush.pdf},
   2451         url = {https://bibliography.gnunet.org},
   2452         author = {Jeannie R. Albrecht and Danny Yuxing Huang}
   2453 }
   2454 @conference {DBLP:conf/tridentcom/PeralaPML10,
   2455         title = {A Novel Testbed for P2P Networks},
   2456         booktitle = {TRIDENTCOM},
   2457         year = {2010},
   2458         pages = {69--83},
   2459         www_section = {emulation, P2P, testbed},
   2460         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/A_Novel_Testbed_for_P2P_Networks.pdf},
   2461         url = {https://bibliography.gnunet.org},
   2462         author = {Pekka H. J. Per{\"a}l{\"a} and Jori P. Paananen and Milton Mukhopadhyay and Jukka-Pekka Laulajainen}
   2463 }
   2464 @conference {Locher:2010:PKN:2018057.2018085,
   2465         title = {Poisoning the Kad network},
   2466         booktitle = {ICDCN'10--Proceedings of the 11th International Conference on Distributed Computing and Networking},
   2467         series = {ICDCN'10},
   2468         year = {2010},
   2469         month = jan,
   2470         pages = {195--206},
   2471         publisher = {Springer-Verlag},
   2472         organization = {Springer-Verlag},
   2473         address = {Kolkata, India},
   2474         abstract = {Since the demise of the Overnet network, the Kad network has become not only the most popular but also the only widely used peer-to-peer system based on a distributed hash table. It is likely that its user base will continue to grow in numbers over the next few years as, unlike the eDonkey network, it does not depend on central servers, which increases scalability and reliability. Moreover, the Kad network is more efficient than unstructured systems such as Gnutella. However, we show that today's Kad network can be attacked in several ways by carrying out several (well-known) attacks on the Kad network. The presented attacks could be used either to hamper the correct functioning of the network itself, to censor contents, or to harm other entities in the Internet not participating in the Kad network such as ordinary web servers. While there are simple heuristics to reduce the impact of some of the attacks, we believe that the presented attacks cannot be thwarted easily in any fully decentralized peer-to-peer system without some kind of a centralized certification and verification authority},
   2475         www_section = {distributed hash table, KAD},
   2476         isbn = {3-642-11321-4, 978-3-642-11321-5},
   2477         doi = {http://dx.doi.org/10.1007/978-3-642-11322-2_22},
   2478         url = {http://dl.acm.org/citation.cfm?id=2018057.2018085},
   2479         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCN\%2710\%20-\%20Poisoning\%20the\%20Kad\%20Network.pdf},
   2480         author = {Thomas Locher and Mysicka, David and Stefan Schmid and Roger Wattenhofer}
   2481 }
   2482 @conference {FessiIPTComm2010,
   2483         title = {Pr2-P2PSIP: Privacy Preserving P2P Signaling for VoIP and IM},
   2484         booktitle = {Principles, Systems and Applications of IP Telecommunications (IPTComm), Munich},
   2485         year = {2010},
   2486         month = {August},
   2487         pages = {141--152},
   2488         address = {Munich, Germany},
   2489         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fessi_iptcomm_2010.pdf},
   2490         url = {https://bibliography.gnunet.org},
   2491         www_section = unsorted,
   2492         author = {Fessi, Ali and Nathan S Evans and Heiko Niedermayer and Ralph Holz}
   2493 }
   2494 @article {Isdal:2010:PPD:1851275.1851198,
   2495         title = {Privacy-preserving P2P data sharing with OneSwarm},
   2496         journal = {SIGCOMM Comput. Commun. Rev},
   2497         volume = {40},
   2498         number = {4},
   2499         year = {2010},
   2500         pages = {111--122},
   2501         publisher = {ACM},
   2502         address = {New York, NY, USA},
   2503         www_section = {anonymity, OneSwarm, p2p network},
   2504         issn = {0146-4833},
   2505         doi = {10.1145/1851275.1851198},
   2506         url = {http://doi.acm.org/10.1145/1851275.1851198},
   2507         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oneswarm_SIGCOMM.pdf},
   2508         author = {Isdal, Tomas and Piatek, Michael and Krishnamurthy, Arvind and Anderson, Thomas}
   2509 }
   2510 @article {1667071,
   2511         title = {Privacy-preserving similarity-based text retrieval},
   2512         journal = {ACM Trans. Internet Technol},
   2513         volume = {10},
   2514         number = {1},
   2515         year = {2010},
   2516         pages = {1--39},
   2517         publisher = {ACM},
   2518         address = {New York, NY, USA},
   2519         abstract = {Users of online services are increasingly wary that their activities could disclose confidential information on their business or personal activities. It would be desirable for an online document service to perform text retrieval for users, while protecting the privacy of their activities. In this article, we introduce a privacy-preserving, similarity-based text retrieval scheme that (a) prevents the server from accurately reconstructing the term composition of queries and documents, and (b) anonymizes the search results from unauthorized observers. At the same time, our scheme preserves the relevance-ranking of the search server, and enables accounting of the number of documents that each user opens. The effectiveness of the scheme is verified empirically with two real text corpora},
   2520         www_section = {keywords, privacy, search, text mining},
   2521         issn = {1533-5399},
   2522         doi = {http://doi.acm.org/10.1145/1667067.1667071},
   2523         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/privacy_preserving_similarity.pdf},
   2524         url = {https://bibliography.gnunet.org},
   2525         author = {Pang, Hweehwa and Shen, Jialie and Krishnan, Ramayya}
   2526 }
   2527 @conference {2010_10,
   2528         title = {Private Record Matching Using Differential Privacy},
   2529         booktitle = {Proceedings of the 13th International Conference on Extending Database Technology},
   2530         year = {2010},
   2531         publisher = {ACM},
   2532         organization = {ACM},
   2533         address = {New York, NY, USA},
   2534         abstract = {Private matching between datasets owned by distinct parties is a challenging problem with several applications. Private matching allows two parties to identify the records that are close to each other according to some distance functions, such that no additional information other than the join result is disclosed to any party. Private matching can be solved securely and accurately using secure multi-party computation (SMC) techniques, but such an approach is prohibitively expensive in practice. Previous work proposed the release of sanitized versions of the sensitive datasets which allows blocking, i.e., filtering out sub-sets of records that cannot be part of the join result. This way, SMC is applied only to a small fraction of record pairs, reducing the matching cost to acceptable levels. The blocking step is essential for the privacy, accuracy and efficiency of matching. However, the state-of-the-art focuses on sanitization based on k-anonymity, which does not provide sufficient privacy. We propose an alternative design centered on differential privacy, a novel paradigm that provides strong privacy guarantees. The realization of the new model presents difficult challenges, such as the evaluation of distance-based matching conditions with the help of only a statistical queries interface. Specialized versions of data indexing structures (e.g., kd-trees) also need to be devised, in order to comply with differential privacy. Experiments conducted on the real-world Census-income dataset show that, although our methods provide strong privacy, their effectiveness in reducing matching cost is not far from that of k-anonymity based counterparts},
   2535         www_section = {Differential Privacy, privacy, record matching, security},
   2536         isbn = {978-1-60558-945-9},
   2537         doi = {10.1145/1739041.1739059},
   2538         url = {http://doi.acm.org/10.1145/1739041.1739059},
   2539         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateRecordMatching2010Inan.pdf},
   2540         author = {Inan, Ali and Kantarcioglu, Murat and Ghinita, Gabriel and Bertino, Elisa}
   2541 }
   2542 @conference {1827425,
   2543         title = {Providing basic security mechanisms in broker-less publish/subscribe systems},
   2544         booktitle = {DEBS '10: Proceedings of the Fourth ACM International Conference on Distributed Event-Based Systems},
   2545         year = {2010},
   2546         pages = {38--49},
   2547         publisher = {ACM},
   2548         organization = {ACM},
   2549         address = {New York, NY, USA},
   2550         abstract = {The provisioning of basic security mechanisms such as authentication and confidentiality is highly challenging in a content-based publish/subscribe system. Authentication of publishers and subscribers is difficult to achieve due to the loose coupling of publishers and subscribers. Similarly, confidentiality of events and subscriptions conflicts with content-based routing. In particular, content-based approaches in broker-less environments do not address confidentiality at all. This paper presents a novel approach to provide confidentiality and authentication in a broker-less content-based publish-subscribe system. The authentication of publishers and subscribers as well as confidentiality of events is ensured, by adapting the pairing-based cryptography mechanisms, to the needs of a publish/subscribe system. Furthermore, an algorithm to cluster subscribers according to their subscriptions preserves a weak notion of subscription confidentiality. Our approach provides fine grained key management and the cost for encryption, decryption and routing is in the order of subscribed attributes. Moreover, the simulation results verify that supporting security is affordable with respect to the cost for overlay construction and event dissemination latencies, thus preserving scalability of the system},
   2551         www_section = {P2P, publish/subscribe},
   2552         isbn = {978-1-60558-927-5},
   2553         doi = {10.1145/1827418.1827425},
   2554         url = {http://portal.acm.org/citation.cfm?id=1827418.1827425\&coll=portal\&dl=GUIDE$\#$},
   2555         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DIP_2872.pdf},
   2556         author = {Tariq, Muhammad Adnan and Boris Koldehofe and Altaweel, Ala and Kurt Rothermel}
   2557 }
   2558 @article {1672334,
   2559         title = {Reconnecting the internet with ariba: self-organizing provisioning of end-to-end connectivity in heterogeneous networks},
   2560         journal = {SIGCOMM Comput. Commun. Rev},
   2561         volume = {40},
   2562         number = {1},
   2563         year = {2010},
   2564         pages = {131--132},
   2565         publisher = {ACM},
   2566         address = {New York, NY, USA},
   2567         abstract = {End-to-End connectivity in today's Internet can no longer be taken for granted. Middleboxes, mobility, and protocol heterogeneity complicate application development and often result in application-specific solutions. In our demo we present ariba: an overlay-based approach to handle such network challenges and to provide consistent homogeneous network primitives in order to ease application and service development},
   2568         www_section = {heterogeneity, overlay networks, P2P},
   2569         issn = {0146-4833},
   2570         doi = {10.1145/1672308.1672334},
   2571         url = {http://portal.acm.org/citation.cfm?doid=1672308.1672334$\#$},
   2572         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p131-v40n1n-huebschA.pdf},
   2573         author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland Bless and Oliver Waldhorst and Martina Zitterbart}
   2574 }
   2575 @conference {2010_11,
   2576         title = {On Runtime Adaptation of Application-Layer Multicast Protocol Parameters},
   2577         booktitle = {Proceedings of Networked Services and Applications -- Engineering, Control and Management (EUNICE)},
   2578         series = {Lecture Notes in Computer Science},
   2579         year = {2010},
   2580         note = {to appear},
   2581         publisher = {Springer},
   2582         organization = {Springer},
   2583         address = {Trondheim, Norway},
   2584         url = {http://www.tm.uni-karlsruhe.de/itm/WebMan/view.php?view=publikationen_detail\&id=389\&lang=en},
   2585         www_section = unsorted,
   2586         author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}
   2587 }
   2588 @conference {2010_12,
   2589         title = {Scalable Application-Layer Multicast Simulations with OverSim},
   2590         booktitle = {7th Annual IEEE Consumer Communiations \& Networking Conference},
   2591         year = {2010},
   2592         abstract = {Application-Layer Multicast has become a promising class of protocols since IP Multicast has not found wide area deployment in the Internet. Developing such protocols requires in-depth analysis of their properties even with large numbers of participants---a characteristic which is at best hard to achieve in real network experiments. Several well-known simulation frameworks have been developed and used in recent years, but none has proved to be fitting the requirements for analyzing large-scale application-layer networks. In this paper we propose the OverSim framework as a promising simulation environment for scalabe Application-Layer Multicast research. We show that OverSim is able to manage even overlays with several thousand participants in short time while consuming comparably little memory. We compare the framework's runtime properties with the two exemplary Application-Layer Mutlicast protocols Scribe and NICE. The results show that both simulation time and memory consumption grow linearly with the number of nodes in highly feasible dimensions},
   2593         www_section = {multicast, NICE, OverSim, Scribe},
   2594         url = {https://bibliography.gnunet.org},
   2595         author = {Stephan Krause and H{\"u}bsch, Christian}
   2596 }
   2597 @conference {Burkhart:2010:SPA:1929820.1929840,
   2598         title = {SEPIA: privacy-preserving aggregation of multi-domain network events and statistics},
   2599         booktitle = {Proceedings of the 19th USENIX conference on Security},
   2600         series = {USENIX Security'10},
   2601         year = {2010},
   2602         month = aug,
   2603         pages = {15--15},
   2604         publisher = {USENIX Association},
   2605         organization = {USENIX Association},
   2606         address = {Washington, DC, USA},
   2607         abstract = {Secure multiparty computation (MPC) allows joint privacy-preserving computations on data of multiple parties. Although MPC has been studied substantially, building solutions that are practical in terms of computation and communication cost is still a major challenge. In this paper, we investigate the practical usefulness of MPC for multi-domain network security and monitoring. We first optimize MPC comparison operations for processing high volume data in near real-time. We then design privacy-preserving protocols for event correlation and aggregation of network traffic statistics, such as addition of volume metrics, computation of feature entropy, and distinct item count. Optimizing performance of parallel invocations, we implement our protocols along with a complete set of basic operations in a library called SEPIA. We evaluate the running time and bandwidth requirements of our protocols in realistic settings on a local cluster as well as on PlanetLab and show that they work in near real-time for up to 140 input providers and 9 computation nodes. Compared to implementations using existing general-purpose MPC frameworks, our protocols are significantly faster, requiring, for example, 3 minutes for a task that takes 2 days with general-purpose frameworks. This improvement paves the way for new applications of MPC in the area of networking. Finally, we run SEPIA's protocols on real traffic traces of 17 networks and show how they provide new possibilities for distributed troubleshooting and early anomaly detection},
   2608         www_section = {privacy, secure multi-party computation, SMC},
   2609         isbn = {888-7-6666-5555-4},
   2610         url = {http://dl.acm.org/citation.cfm?id=1929820.1929840},
   2611         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/USENIX\%20Security\%2710\%20-\%20SEPIA.pdf},
   2612         author = {Burkhart, Martin and Strasser, Mario and Many, Dilip and Dimitropoulos, Xenofontas}
   2613 }
   2614 @conference {Marks2010a,
   2615         title = {Unleashing Tor, BitTorrent \& Co.: How to Relieve TCP Deficiencies in Overlays},
   2616         booktitle = {LCN 2010: Proceedings of the 35th IEEE Conference on Local Computer Networks},
   2617         year = {2010},
   2618         url = {https://bibliography.gnunet.org},
   2619         www_section = unsorted,
   2620         author = {Daniel Marks and Florian Tschorsch and Bjoern Scheuermann}
   2621 }
   2622 @conference {2010_13,
   2623         title = {User-perceived Performance of the NICE Application Layer Multicast Protocol in Large and Highly Dynamic Groups},
   2624         booktitle = {Proceedings of 15th International GI/ITG Conference on "Measurement, Modelling and Evaluation of Computing Systems"},
   2625         year = {2010},
   2626         note = {Best Paper Award},
   2627         month = jan,
   2628         pages = {62--77},
   2629         publisher = {Springer Berlin, Heidelberg},
   2630         organization = {Springer Berlin, Heidelberg},
   2631         address = {Essen, Germany},
   2632         abstract = {The presentation of a landmark paper by Chu et al. at SIGMETRICS 2000 introduced application layer multicast (ALM) as completely new area of network research. Many researchers have since proposed ALM protocols, and have shown that these protocols only put a small burden on the network in terms of link-stress and -stretch. However, since the network is typically not a bottleneck, user acceptance remains the limiting factor for the deployment of ALM. In this paper we present an in-depth study of the user-perceived performance of the NICE ALM protocol. We use the OverSim simulation framework to evaluate delay experienced by a user and bandwidth consumption on the user's access link in large multicast groups and under aggressive churn models. Our major results are (1) latencies grow moderate with increasing number of nodes as clusters get optimized, (2) join delays get optimized over time, and (3) despite being a tree-dissemination protocol NICE handles churn surprisingly well when adjusting heartbeat intervals accordingly. We conclude that NICE comes up to the user's expectations even for large groups and under high churn.
   2633 This work was partially funded as part of the Spontaneous Virtual Networks (SpoVNet) project by the Landesstiftung Baden-W{\"u}rttemberg within the BW-FIT program and as part of the Young Investigator Group Controlling Heterogeneous and Dynamic Mobile Grid and Peer-to-Peer Systems (CoMoGriP) by the Concept for the Future of Karlsruhe Institute of Technology (KIT) within the framework of the German Excellence Initiative},
   2634         isbn = {978-3-642-12103-6},
   2635         doi = {10.1007/978-3-642-12104-3},
   2636         url = {http://www.springerlink.com/content/t6k421560103540n/},
   2637         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/User-Perceived\%20Performance\%20of\%20the\%20NICE\%20Application\%20Layer\%20Multicast\%20Protocol\%20in\%20Large\%20and\%20Highly\%20Dynamic\%20Groups_1.pdf},
   2638         www_section = unsorted,
   2639         author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}
   2640 }
   2641 @conference {2010_14,
   2642         title = {Using Legacy Applications in Future Heterogeneous Networks with ariba},
   2643         booktitle = {Proceedings of IEEE INFOCOM},
   2644         year = {2010},
   2645         note = {Demo},
   2646         address = {San Diego, CA, USA},
   2647         url = {https://bibliography.gnunet.org},
   2648         www_section = unsorted,
   2649         author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland Bless and Oliver Waldhorst and Martina Zitterbart}
   2650 }
   2651 @conference {DBLP:conf/ccs/EdmanS09,
   2652         title = {AS-awareness in Tor path selection},
   2653         booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009},
   2654         year = {2009},
   2655         pages = {380--389},
   2656         publisher = {ACM},
   2657         organization = {ACM},
   2658         abstract = {Tor is an anonymous communications network with thousands of router nodes worldwide. An intuition reflected in much of the literature on anonymous communications is that, as an anonymity network grows, it becomes more secure against a given observer because the observer will see less of the network. In particular, as the Tor network grows from volunteers operating relays all over the world, it becomes less and less likely for a single autonomous system (AS) to be able to observe both ends of an anonymous connection. Yet, as the network continues to grow significantly, no analysis has been done to determine if this intuition is correct. Further, modifications to Tor's path selection algorithm to help clients avoid an AS-level observer have not been proposed and analyzed.
   2659 
   2660 Five years ago a previous study examined the AS-level threat against client and destination addresses chosen a priori to be likely or interesting to examine. Using an AS-level path inference algorithm with improved accuracy, more extensive Internet routing data, and, most importantly, a model of typical Tor client AS-level sources and destinations based on data gathered from the live network, we demonstrate that the threat of a single AS observing both ends of an anonymous Tor connection is greater than previously thought. We look at the growth of the Tor network over the past five years and show that its explosive growth has had only a small impact on the network's robustness against an AS-level attacker. Finally, we propose and evaluate the effectiveness of some simple, AS-aware path selection algorithms that avoid the computational overhead imposed by full AS-level path inference algorithms. Our results indicate that a novel heuristic we propose is more effective against an AS-level observer than other commonly proposed heuristics for improving location diversity in path selection},
   2661         www_section = {anonymity, autonomous systems, privacy, Tor},
   2662         isbn = {978-1-60558-894-0},
   2663         doi = {10.1145/1653662.1653708},
   2664         url = {http://portal.acm.org/citation.cfm?id=1653662.1653708},
   2665         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EdmanS09.pdf},
   2666         author = {Matthew Edman and Paul Syverson},
   2667         editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}
   2668 }
   2669 @conference {Attrapadung:2009:AES:1696791.1696811,
   2670         title = {Attribute-Based Encryption Supporting Direct/Indirect Revocation Modes},
   2671         booktitle = {Proceedings of the 12th IMA International Conference on Cryptography and Coding},
   2672         series = {Cryptography and Coding '09},
   2673         year = {2009},
   2674         month = dec,
   2675         pages = {278--300},
   2676         publisher = {Springer-Verlag},
   2677         organization = {Springer-Verlag},
   2678         address = {Cirencester, UK},
   2679         abstract = {Attribute-based encryption (ABE) enables an access control mechanism over encrypted data by specifying access policies among private keys and ciphertexts. In this paper, we focus on ABE that supports revocation. Currently, there are two available revocable ABE schemes in the literature. Their revocation mechanisms, however, differ in the sense that they can be considered as direct and indirect methods. <em>Direct revocation</em> enforces revocation directly by the sender who specifies the revocation list while encrypting. <em>Indirect revocation</em> enforces revocation by the key authority who releases a key update material periodically in such a way that only non-revoked users can update their keys (hence, revoked users' keys are implicitly rendered useless). An advantage of the indirect method over the direct one is that it does not require senders to know the revocation list. In contrast, an advantage of the direct method over the other is that it does not involve key update phase for all non-revoked users interacting with the key authority. In this paper, we present the first <em>Hybrid Revocable ABE</em> scheme that allows senders to select on-the-fly when encrypting whether to use either direct or indirect revocation mode; therefore, it combines best advantages from both methods},
   2680         isbn = {978-3-642-10867-9},
   2681         doi = {http://dx.doi.org/10.1007/978-3-642-10868-6_17},
   2682         url = {http://dx.doi.org/10.1007/978-3-642-10868-6_17},
   2683         www_section = unsorted,
   2684         author = {Attrapadung, Nuttapong and Imai, Hideki}
   2685 }
   2686 @conference {DBLP:conf/ccs/TroncosoD09,
   2687         title = {The bayesian traffic analysis of mix networks},
   2688         booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009},
   2689         year = {2009},
   2690         pages = {369--379},
   2691         publisher = {ACM},
   2692         organization = {ACM},
   2693         abstract = {This work casts the traffic analysis of anonymity systems, and in particular mix networks, in the context of Bayesian inference. A generative probabilistic model of mix network architectures is presented, that incorporates a number of attack techniques in the traffic analysis literature. We use the model to build an Markov Chain Monte Carlo inference engine, that calculates the probabilities of who is talking to whom given an observation of network traces. We provide a thorough evaluation of its correctness and performance, and confirm that mix networks with realistic parameters are secure. This approach enables us to apply established information theoretic anonymity metrics on complex mix networks, and extract information from anonymised traffic traces optimally},
   2694         www_section = {anonymity, Markov chain, traffic analysis},
   2695         isbn = {978-1-60558-894-0},
   2696         doi = {10.1145/1653662.1653707},
   2697         url = {http://portal.acm.org/citation.cfm?id=1653662.1653707},
   2698         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TroncosoD09.pdf},
   2699         author = {Carmela Troncoso and George Danezis},
   2700         editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}
   2701 }
   2702 @conference {1659021,
   2703         title = {Bloom filters and overlays for routing in pocket switched networks},
   2704         booktitle = {Co-Next Student Workshop '09: Proceedings of the 5th international student workshop on Emerging networking experiments and technologies},
   2705         year = {2009},
   2706         pages = {43--44},
   2707         publisher = {ACM},
   2708         organization = {ACM},
   2709         address = {New York, NY, USA},
   2710         abstract = {Pocket Switched Networks (PSN) [3] have become a promising approach for providing communication between scarcely connected human-carried devices. Such devices, e.g. mobile phones or sensor nodes, are exposed to human mobility and can therewith leverage inter-human contacts for store-and-forward routing. Efficiently routing in such delay tolerant networks is complex due to incomplete knowledge about the network, and high dynamics of the network. In this work we want to develop an extension of Bloom filters for resource-efficient routing in pocket switched networks. Furthermore, we argue that PSNs may become densely populated in special situations. We want to exploit such situations to perform collaborative calculations of forwarding-decisions. In this paper we present a simple scheme for distributed decision calculation using overlays and a DHT-based distributed variant of Bloom filters},
   2711         www_section = {Bloom filter, overlay networks, pocket switched network},
   2712         isbn = {978-1-60558-751-6},
   2713         doi = {10.1145/1658997.1659021},
   2714         url = {http://portal.acm.org/citation.cfm?doid=1658997.1659021$\#$},
   2715         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/conext09-phdworkshop-cameraready.pdf},
   2716         author = {Mayer, Christoph P.}
   2717 }
   2718 @conference {Knoll:2009:BPS:1590968.1591829,
   2719         title = {Bootstrapping Peer-to-Peer Systems Using IRC},
   2720         booktitle = {WETICE'09--Proceedings of the 18th IEEE International Workshops on Enabling Technologies: Infrastructures for Collaborative Enterprises},
   2721         series = {WETICE '09},
   2722         year = {2009},
   2723         month = jun,
   2724         pages = {122--127},
   2725         publisher = {IEEE Computer Society},
   2726         organization = {IEEE Computer Society},
   2727         address = {Groningen, The Netherlands},
   2728         abstract = {Research in the area of peer-to-peer systems is mainly focused on structuring the overlay network. Little attention is paid to the process of setting up and joining a peer-to-peer overlay network, i.e. the bootstrapping of peer-to-peer networks. The major challenge is to get hold of one peer that is already in the overlay. Otherwise, the first peer must be able to detect that the overlay is currently empty. Successful P2P applications either provide a centralized server for this task (Skype) or they simply put the burden on the user (eMule). We propose an automatic solution which does not require any user intervention and does not exhibit a single point of failure. Such decentralized bootstrapping protocols are especially important for open non-commercial peer-to-peer systems which cannot provide a server infrastructure for bootstrapping. The algorithm we are proposing builds on the Internet Relay Chat (IRC), a highly available, open,and distributed network of chat servers. Our algorithm is designed to put only a very minimal load on the IRC servers.In measurements we show that our bootstrapping protocol scales very well, handles flash crowds, and does only put a constant load on the IRC system disregarding of the peer-to-peer overlay size},
   2729         www_section = {automated, bootstrapping, decentralized, efficient, IRC, P2P, peer-to-peer networking},
   2730         isbn = {978-0-7695-3683-5},
   2731         doi = {http://dx.doi.org/10.1109/WETICE.2009.40},
   2732         url = {http://dx.doi.org/10.1109/WETICE.2009.40},
   2733         author = {Knoll, Mirko and Helling, Matthias and Arno Wacker and Holzapfel, Sebastian and Weis, Torben}
   2734 }
   2735 @article {2009_0,
   2736         title = {Brahms: Byzantine Resilient Random Membership Sampling},
   2737         journal = {Computer Networks Journal (COMNET), Special Issue on Gossiping in Distributed Systems},
   2738         year = {2009},
   2739         month = {April},
   2740         www_section = {Byzantine Resilient Sampling, Random Membership, random sampling},
   2741         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brahms-Comnet-Mar09.pdf , https://git.gnunet.org/bibliography.git/plain/docs/Brahms-rps-mar09.pdf},
   2742         url = {https://bibliography.gnunet.org},
   2743         author = {Edward Bortnikov and Maxim Gurevich and Idit Keidar and Gabriel Kliot and Alexander Shraer}
   2744 }
   2745 @conference {DBLP:conf/sss/Kermarrec09,
   2746         title = {Challenges in Personalizing and Decentralizing the Web: An Overview of GOSSPLE},
   2747         booktitle = {Challenges in Personalizing and Decentralizing the Web: An Overview of GOSSPLE},
   2748         year = {2009},
   2749         pages = {1--16},
   2750         url = {https://bibliography.gnunet.org},
   2751         www_section = unsorted,
   2752         author = {Anne-Marie Kermarrec}
   2753 }
   2754 @conference {2009_1,
   2755         title = {CLIO/UNISONO: practical distributed and overlay- wide network measurement},
   2756         booktitle = {CLIO/UNISONO: practical distributed and overlay-wide network measurement},
   2757         year = {2009},
   2758         abstract = {Building on previous work, we present an early version of our CLIO/UNISONO framework for distributed network measurements. CLIO/UNISONO is a generic measurement framework specifically aimed at overlays that need measurements for optimization purposes. In this talk, we briefly introduce the most important concepts and then focus on some more advanced mechanisms like measurements across connectivity domains and remote orders},
   2759         url = {https://bibliography.gnunet.org},
   2760         www_section = unsorted,
   2761         author = {Ralph Holz and Dirk Haage}
   2762 }
   2763 @conference {2009_2,
   2764         title = {A Collusion-Resistant Distributed Scalar Product Protocol with Application to Privacy-Preserving Computation of Trust},
   2765         booktitle = {Network Computing and Applications, 2009. NCA 2009. Eighth IEEE International Symposium on},
   2766         year = {2009},
   2767         month = {July},
   2768         abstract = {Private scalar product protocols have proved to be interesting in various applications such as data mining, data integration, trust computing, etc. In 2007, Yao et al. proposed a distributed scalar product protocol with application to privacy-preserving computation of trust [1]. This protocol is split in two phases: an homorphic encryption computation; and a private multi-party summation protocol. The summation protocol has two drawbacks: first, it generates a non-negligible communication overhead; and second, it introduces a security flaw. The contribution of this present paper is two-fold. We first prove that the protocol of [1] is not secure in the semi-honest model by showing that it is not resistant to collusion attacks and we give an example of a collusion attack, with only four participants. Second, we propose to use a superposed sending round as an alternative to the multi-party summation protocol, which results in better security properties and in a reduction of the communication costs. In particular, regarding security, we show that the previous scheme was vulnerable to collusions of three users whereas in our proposal we can t isin [1..n--1] and define a protocol resisting to collusions of up to t users},
   2769         www_section = {collaboration, collusion-resistant distributed protocol, Computer applications, computer networks, cryptographic protocols, cryptography, data privacy, distributed computing, homorphic encryption computation, Laboratories, Portable media players, privacy-preserving computation, Privacy-preserving computation of trust, private multiparty summation protocol, scalar product protocol, secure multi-party computation, Secure scalar product, security, Superposed sending., Telephony, trust computation},
   2770         doi = {10.1109/NCA.2009.48},
   2771         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CollusionResistant2009Melchor.pdf},
   2772         url = {https://bibliography.gnunet.org},
   2773         author = {Melchor, C.A. and Ait-Salem, B. and Gaborit, P.}
   2774 }
   2775 @article {DBLP:journals/tdp/NojimaK09,
   2776         title = {Cryptographically secure Bloom-filters},
   2777         journal = {Transactions on Data Privacy},
   2778         volume = {2},
   2779         number = {2},
   2780         year = {2009},
   2781         pages = {131--139},
   2782         url = {https://bibliography.gnunet.org},
   2783         www_section = unsorted,
   2784         author = {Ryo Nojima and Youki Kadobayashi}
   2785 }
   2786 @conference {DBLP:conf/sp/NarayananS09,
   2787         title = {De-anonymizing Social Networks},
   2788         booktitle = {Proceedings of the 30th IEEE Symposium on Security and Privacy (S\&P 2009), 17-20 May, Oakland, California, USA},
   2789         year = {2009},
   2790         pages = {173--187},
   2791         publisher = {IEEE Computer Society},
   2792         organization = {IEEE Computer Society},
   2793         abstract = {Operators of online social networks are increasingly sharing potentially sensitive information about users and their relationships with advertisers, application developers, and data-mining researchers. Privacy is typically protected by anonymization, i.e., removing names, addresses, etc.
   2794 
   2795 We present a framework for analyzing privacy and anonymity in social networks and develop a new re-identification algorithm targeting anonymized social-network graphs. To demonstrate its effectiveness on real-world networks, we show that a third of the users who can be verified to have accounts on both Twitter, a popular microblogging service, and Flickr, an online photo-sharing site, can be re-identified in the anonymous Twitter graph with only a 12\% error rate.
   2796 
   2797 Our de-anonymization algorithm is based purely on the network topology, does not require creation of a large number of dummy "sybil" nodes, is robust to noise and all existing defenses, and works even when the overlap between the target network and the adversary's auxiliary information is small},
   2798         www_section = {anonymity, network topology, privacy},
   2799         isbn = {978-0-7695-3633-0},
   2800         url = {http://randomwalker.info/social-networks/},
   2801         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NarayananS09.pdf},
   2802         author = {Arvind Narayanan and Vitaly Shmatikov}
   2803 }
   2804 @article {Badishi:2009:DFC:1550962.1551186,
   2805         title = {Deleting files in the Celeste peer-to-peer storage system},
   2806         journal = {Journal of Parallel and Distributed Computing},
   2807         volume = {69},
   2808         year = {2009},
   2809         month = jul,
   2810         pages = {613--622},
   2811         publisher = {Academic Press, Inc},
   2812         address = {Orlando, FL, USA},
   2813         abstract = {Celeste is a robust peer-to-peer object store built on top of a distributed hash table (DHT). Celeste is a working system, developed by Sun Microsystems Laboratories. During the development of Celeste, we faced the challenge of complete object deletion, and moreover, of deleting ''files'' composed of several different objects. This important problem is not solved by merely deleting meta-data, as there are scenarios in which all file contents must be deleted, e.g., due to a court order. Complete file deletion in a realistic peer-to-peer storage system has not been previously dealt with due to the intricacy of the problem--the system may experience high churn rates, nodes may crash or have intermittent connectivity, and the overlay network may become partitioned at times. We present an algorithm that eventually deletes all file contents, data and meta-data, in the aforementioned complex scenarios. The algorithm is fully functional and has been successfully integrated into Celeste},
   2814         www_section = {Celeste, fault-tolerance, peer-to-peer networking, storage},
   2815         issn = {0743-7315},
   2816         doi = {10.1016/j.jpdc.2009.03.003},
   2817         url = {http://dl.acm.org/citation.cfm?id=1550962.1551186},
   2818         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Parallel\%20\%26\%20Distributed\%20Computing\%20-\%20Deleting\%20files\%20in\%20the\%20Celeste\%20p2p\%20storage\%20systems.pdf},
   2819         author = {Badishi, Gal and Caronni, Germano and Keidar, Idit and Rom, Raphael and Scott, Glenn}
   2820 }
   2821 @conference {2009_3,
   2822         title = {Differentially Private Recommender Systems: Building Privacy into the Netflix Prize Contenders},
   2823         booktitle = {Proceedings of the 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
   2824         year = {2009},
   2825         publisher = {ACM},
   2826         organization = {ACM},
   2827         address = {New York, NY, USA},
   2828         abstract = {We consider the problem of producing recommendations from collective user behavior while simultaneously providing guarantees of privacy for these users. Specifically, we consider the Netflix Prize data set, and its leading algorithms, adapted to the framework of differential privacy.
   2829 
   2830 Unlike prior privacy work concerned with cryptographically securing the computation of recommendations, differential privacy constrains a computation in a way that precludes any inference about the underlying records from its output. Such algorithms necessarily introduce uncertainty--i.e., noise--to computations, trading accuracy for privacy.
   2831 
   2832 We find that several of the leading approaches in the Netflix Prize competition can be adapted to provide differential privacy, without significantly degrading their accuracy. To adapt these algorithms, we explicitly factor them into two parts, an aggregation/learning phase that can be performed with differential privacy guarantees, and an individual recommendation phase that uses the learned correlations and an individual's data to provide personalized recommendations. The adaptations are non-trivial, and involve both careful analysis of the per-record sensitivity of the algorithms to calibrate noise, as well as new post-processing steps to mitigate the impact of this noise.
   2833 
   2834 We measure the empirical trade-off between accuracy and privacy in these adaptations, and find that we can provide non-trivial formal privacy guarantees while still outperforming the Cinematch baseline Netflix provides},
   2835         www_section = {Differential Privacy, Netflix, recommender systems},
   2836         isbn = {978-1-60558-495-9},
   2837         doi = {10.1145/1557019.1557090},
   2838         url = {http://doi.acm.org/10.1145/1557019.1557090},
   2839         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateRecommender2009McSherry.pdf},
   2840         author = {McSherry, Frank and Mironov, Ilya}
   2841 }
   2842 @conference {2009_4,
   2843         title = {Enhancing Application-Layer Multicast Solutions by Wireless Underlay Support},
   2844         booktitle = {Kommunikation in Verteilten Systemen (KiVS) 2009, Kassel, Germany},
   2845         year = {2009},
   2846         abstract = {Application Layer Multicast (ALM) is an attractive solution to overcome the deployment problems of IP-Multicast. We show how to cope with the challenges of incorporating wireless devices into ALM protocols. As a rst approach we extend the NICE protocol, significantly increasing its performance in scenarios with many devices connected through wireless LAN},
   2847         www_section = {multicast},
   2848         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.143.2935},
   2849         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nice-wli.pdf},
   2850         author = {H{\"u}bsch, Christian and Oliver Waldhorst}
   2851 }
   2852 @mastersthesis {2009_5,
   2853         title = {Evaluation of Current P2P-SIP Proposals with Respect to the Igor/SSR API },
   2854         volume = {Computer Science},
   2855         year = {2009},
   2856         school = {Technische Universit{\"a}t M{\"u}nchen},
   2857         type = {Diplomarbeit},
   2858         address = {Munich, Germany},
   2859         url = {https://bibliography.gnunet.org},
   2860         www_section = unsorted,
   2861         author = {Markus Bucher}
   2862 }
   2863 @conference {Cholez:2009:ESA:1574663.1574671,
   2864         title = {Evaluation of Sybil Attacks Protection Schemes in KAD},
   2865         booktitle = {AIMS'09--Proceedings of the 3rd International Conference on Autonomous Infrastructure, Management and Security: Scalability of Networks and Services},
   2866         series = {Lecture Notes in Computer Science},
   2867         volume = {5637},
   2868         year = {2009},
   2869         month = jun,
   2870         pages = {70--82},
   2871         publisher = {Springer-Verlag},
   2872         organization = {Springer-Verlag},
   2873         address = {Enschede, The Netherlands},
   2874         abstract = {In this paper, we assess the protection mechanisms entered into recent clients to fight against the Sybil attack in KAD, a widely deployed Distributed Hash Table. We study three main mechanisms: a protection against flooding through packet tracking, an IP address limitation and a verification of identities. We evaluate their efficiency by designing and adapting an attack for several KAD clients with different levels of protection. Our results show that the new security rules mitigate the Sybil attacks previously launched. However, we prove that it is still possible to control a small part of the network despite the new inserted defenses with a distributed eclipse attack and limited resources},
   2875         www_section = {defense, distributed hash table, KAD, p2p network, security, Sybil attack},
   2876         isbn = {978-3-642-02626-3},
   2877         doi = {http://dx.doi.org/10.1007/978-3-642-02627-0_6},
   2878         url = {http://dx.doi.org/10.1007/978-3-642-02627-0_6},
   2879         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AIMS\%2709\%20-\%20Sybil\%20attacks\%20protection\%20schemes\%20in\%20KAD.pdf},
   2880         author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}
   2881 }
   2882 @book {Bogetoft:2009:SMC:1601990.1602018,
   2883         title = {Financial Cryptography and Data Security},
   2884         booktitle = {Financial Cryptography and Data Security},
   2885         series = {Lecture Notes in Computer Science},
   2886         volume = {6052},
   2887         year = {2009},
   2888         pages = {325--343},
   2889         publisher = {Springer-Verlag},
   2890         organization = {Springer-Verlag},
   2891         edition = {1st},
   2892         chapter = {Secure Multiparty Computation Goes Live},
   2893         address = {Berlin, Heidelberg},
   2894         abstract = {This book constitutes the thoroughly refereed post-conference proceedings of the 14th International Conference on Financial Cryptography and Data Security, FC 2010, held in Tenerife, Canary Islands, Spain in January 2010. The 19 revised full papers and 15 revised short papers presented together with 1 panel report and 7 poster papers were carefully reviewed and selected from 130 submissions. The papers cover all aspects of securing transactions and systems and feature current research focusing on both fundamental and applied real-world deployments on all aspects surrounding commerce security},
   2895         www_section = {anonymous credentials, bilinear gruop, privacy, secret sharing, SMC, symbolic evaluation},
   2896         isbn = {978-3-642-03548-7},
   2897         doi = {10.1007/978-3-642-03549-4_20},
   2898         url = {http://dx.doi.org/10.1007/978-3-642-03549-4_20},
   2899         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Bogetoft\%20et\%20al.\%20-\%20Secure\%20multiparty\%20computation\%20goes\%20live.pdf},
   2900         author = {Bogetoft, Peter and Christensen, Dan Lund and Damg{\'a}rd, Ivan and Geisler, Martin and Jakobsen, Thomas and Kr{\o}igaard, Mikkel and Nielsen, Janus Dam and Nielsen, Jesper Buus and Nielsen, Kurt and Pagter, Jakob and Schwartzbach, Michael and Toft, Tomas},
   2901         editor = {Roger Dingledine and Philippe Golle}
   2902 }
   2903 @conference {wpes09-dht-attack,
   2904         title = {Hashing it out in public: Common failure modes of DHT-based anonymity schemes},
   2905         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2009)},
   2906         year = {2009},
   2907         month = {November},
   2908         publisher = {ACM},
   2909         organization = {ACM},
   2910         abstract = {We examine peer-to-peer anonymous communication systems that use Distributed Hash Table algorithms for relay selection. We show that common design flaws in these schemes lead to highly effective attacks against the anonymity provided by the schemes. These attacks stem from attacks on DHT routing, and are not mitigated by the well-known DHT security mechanisms due to a fundamental mismatch between the security requirements of DHT routing's put/get functionality and anonymous routing's relay selection functionality. Our attacks essentially allow an adversary that controls only a small fraction of the relays to function as a global active adversary. We apply these attacks in more detail to two schemes: Salsa and Cashmere. In the case of Salsa, we show that an attacker that controls 10\% of the relays in a network of size 10,000 can compromise more than 80\% of all completed circuits; and in the case of Cashmere, we show that an attacker that controls 20\% of the relays in a network of size 64000 can compromise 42\% of the circuits},
   2911         www_section = {anonymity, denial-of-service, P2P},
   2912         isbn = {978-1-60558-783-7},
   2913         doi = {10.1145/1655188.1655199},
   2914         url = {http://portal.acm.org/citation.cfm?id=1655188.1655199},
   2915         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes09-dht-attack.pdf},
   2916         author = {Andrew Tran and Nicholas J. Hopper and Yongdae Kim}
   2917 }
   2918 @conference {1656984,
   2919         title = {Heterogeneous gossip},
   2920         booktitle = {Middleware '09: Proceedings of the 10th ACM/IFIP/USENIX International Conference on Middleware},
   2921         year = {2009},
   2922         pages = {1--20},
   2923         publisher = {Springer-Verlag New York, Inc},
   2924         organization = {Springer-Verlag New York, Inc},
   2925         address = {New York, NY, USA},
   2926         abstract = {Gossip-based information dissemination protocols are considered easy to deploy, scalable and resilient to network dynamics. Load-balancing is inherent in these protocols as the dissemination work is evenly spread among all nodes. Yet, large-scale distributed systems are usually heterogeneous with respect to network capabilities such as bandwidth. In practice, a blind load-balancing strategy might significantly hamper the performance of the gossip dissemination.
   2927 
   2928 This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynamically adapt their contribution to the gossip dissemination according to their bandwidth capabilities. Using a continuous, itself gossip-based, approximation of relative bandwidth capabilities, HEAP dynamically leverages the most capable nodes by increasing their fanout, while decreasing by the same proportion that of less capable nodes. HEAP preserves the simple and proactive (churn adaptation) nature of gossip, while significantly improving its effectiveness. We extensively evaluate HEAP in the context of a video streaming application on a testbed of 270 PlanetLab nodes. Our results show that HEAP significantly improves the quality of the streaming over standard homogeneous gossip protocols, especially when the stream rate is close to the average available bandwidth},
   2929         www_section = {heterogeneity, load balancing},
   2930         url = {http://portal.acm.org/citation.cfm?id=1656984$\#$},
   2931         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/middleware-monod.pdf},
   2932         author = {Frey, Davide and Rachid Guerraoui and Anne-Marie Kermarrec and Boris Koldehofe and Mogensen, Martin and Monod, Maxime and Qu{\'e}ma, Vivien}
   2933 }
   2934 @article {EURECOM+2885,
   2935         title = {Long term study of peer behavior in the KAD DHT},
   2936         journal = {IEEE/ACM Transactions on Networking},
   2937         volume = {17},
   2938         year = {2009},
   2939         month = may,
   2940         chapter = {1371},
   2941         abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHTs have been implemented in real systems and deployed on a large scale. One exception is KAD, a DHT based on Kademlia, which is part of eDonkey, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling a representative subset of KAD every five minutes for six months and obtained information about geographical distribution of peers, session times, daily usage, and peer lifetime. We have found that session times are Weibull distributed and we show how this information can be exploited to make the publishing mechanism much more efficient. Peers are identified by the so-called KAD ID, which up to now was assumed to be persistent. However, we observed that a fraction of peers changes their KAD ID as frequently as once a session. This change of KAD IDs makes it difficult to characterize end-user behavior. For this reason we have been crawling the entire KAD network once a day for more than a year to track end-users with static IP addresses, which allows us to estimate end-user lifetime and the fraction of end-users changing their KAD ID},
   2942         www_section = {churn, distributed hash table, KAD, Kademlia},
   2943         issn = {1063-6692},
   2944         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Long\%20Term\%20Study\%20of\%20Peer\%20Behavior\%20in\%20the\%20kad\%20DHT.pdf},
   2945         url = {https://bibliography.gnunet.org},
   2946         author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}
   2947 }
   2948 @conference {1551621,
   2949         title = {Maintaining reference graphs of globally accessible objects in fully decentralized distributed systems},
   2950         booktitle = {HPDC '09: Proceedings of the 18th ACM international symposium on High performance distributed computing},
   2951         year = {2009},
   2952         pages = {59--60},
   2953         publisher = {ACM},
   2954         organization = {ACM},
   2955         address = {New York, NY, USA},
   2956         abstract = {Since the advent of electronic computing, the processors' clock speed has risen tremendously. Now that energy efficiency requirements have stopped that trend, the number of processing cores per machine started to rise. In near future, these cores will become more specialized, and their inter-connections will form complex networks, both on-chip and beyond. This trend opens new fields of applications for high performance computing: Heterogeneous architectures offer different functionalities and thus support a wider range of applications. The increased compute power of these systems allows more complex simulations and numerical computations. Falling costs enable even small companies to invest in multi-core systems and clusters. However, the growing complexity might impede this growth. Imagine a cluster of thousands of interconnected heterogeneous processor cores. A software developer will need a deep knowledge about the underlying infrastructure as well as the data and communication dependencies in her application to partition it optimally across the available cores. Moreover, a predetermined partitioning scheme cannot reflect failing processors or additionally provided resources. In our poster, we introduce J-Cell, a project that aims at simplifying high performance distributed computing. J-Cell offers a single system image, which allows applications to run transparently on heterogeneous multi-core machines. It distributes code, objects and threads onto the compute resources which may be added or removed at run-time. This dynamic property leads to an ad-hoc network of processors and cores. In this network, a fully decentralized object localization and retrieval algorithm guarantees the access to distributed shared objects},
   2957         www_section = {globally accessible objects, single system image},
   2958         isbn = {978-1-60558-587-1},
   2959         doi = {10.1145/1551609.1551621},
   2960         url = {http://portal.acm.org/citation.cfm?id=1551609.1551621$\#$},
   2961         author = {Bjoern Saballus and Thomas Fuhrmann}
   2962 }
   2963 @conference { moscibroda:on,
   2964         title = {On Mechanism Design without Payments for Throughput Maximization},
   2965         booktitle = {INFOCOM'09. Proceedings of the 28th IEEE International Conference on Computer Communications},
   2966         year = {2009},
   2967         month = apr,
   2968         pages = {972--980},
   2969         publisher = {IEEE Computer Society},
   2970         organization = {IEEE Computer Society},
   2971         address = {Rio de Janeiro, Brazil},
   2972         abstract = {It is well-known that the overall efficiency of a distributed system can suffer if the participating entities seek to maximize their individual performance. Consequently, mechanisms have been designed that force the participants to behave more cooperatively. Most of these game-theoretic solutions rely on payments between participants. Unfortunately, such payments are often cumbersome to implement in practice, especially in dynamic networks and where transaction costs are high. In this paper, we investigate the potential of mechanisms which work without payments. We consider the problem of throughput maximization in multi-channel environments and shed light onto the throughput increase that can be achieved with and without payments. We introduce and analyze two different concepts: the worst-case leverage where we assume that players end up in the worst rational strategy profile, and the average-case leverage where player select a random non-dominated strategy. Our theoretical insights are complemented by simulations },
   2973         www_section = {distributed systems, game-theoretic, individual performance, mechanism design, payment, throughtput maximization},
   2974         isbn = {978-1-4244-3512-8 },
   2975         doi = {http://dx.doi.org/10.1109/INFCOM.2009.5062008},
   2976         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOMM\%2709\%20-\%20Mechanism\%20design\%20without\%20payments.pdf},
   2977         author = {Thomas Moscibroda and Stefan Schmid}
   2978 }
   2979 @conference {DBLP:conf/ccs/VassermanJTHK09,
   2980         title = {Membership-concealing overlay networks},
   2981         booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009},
   2982         year = {2009},
   2983         pages = {390--399},
   2984         publisher = {ACM},
   2985         organization = {ACM},
   2986         www_section = {membership concealment, P2P, privacy},
   2987         isbn = {978-1-60558-894-0},
   2988         doi = {10.1145/1653662.1653709},
   2989         url = {http://portal.acm.org/citation.cfm?id=1653662.1653709},
   2990         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/VassermanJTHK09.pdf},
   2991         author = {Eugene Y. Vasserman and Rob Jansen and James Tyra and Nicholas J. Hopper and Yongdae Kim},
   2992         editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}
   2993 }
   2994 @mastersthesis {2009_6,
   2995         title = {Monte-Carlo Search Techniques in the Modern Board Game Thurn and Taxis},
   2996         volume = {Master Science of Artificial Intelligence},
   2997         year = {2009},
   2998         month = dec,
   2999         pages = {0--93},
   3000         school = {Maastricht University},
   3001         type = {Master Thesis},
   3002         address = {Maastricht, Netherlands},
   3003         abstract = {Modern board games present a new and challenging field when researching
   3004 search techniques in the field of Artificial Intelligence. These games differ to
   3005 classic board games, such as chess, in that they can be non-deterministic, have imperfect information or more than two players. While tree-search approaches, such as alpha-beta pruning, have been quite successful in playing classic board games, by for instance defeating the then reigning world champion Gary Kasparov in Chess, these techniques are not as effective when applied to modern board games.
   3006 This thesis investigates the effectiveness of Monte-Carlo Tree Search when applied to a modern board game, for which the board game Thurn and Taxis was used. This is a non-deterministic modern board game with imperfect information that can be played with more than 2 players, and is hence suitable for research. First, the state-space and game-tree complexities of this game are computed, from which the conclusion can be drawn that the two-player version of the game has a complexity similar to the game Shogi. Several techniques are investigated in order to improve the sampling process, for instance by adding domain knowledge.
   3007 Given the results of the experiments, one can conclude that Monte-Carlo Tree
   3008 Search gives a slight performance increase over standard Monte-Carlo search.
   3009 In addition, the most effective improvements appeared to be the application of
   3010 pseudo-random simulations and limiting simulation lengths, while other techniques have been shown to be less effective or even ineffective. Overall, when applying the best performing techniques, an AI with advanced playing strength has been created, such that further research is likely to push this performance to a strength of expert level},
   3011         www_section = {artificial intelligence, MCTS, modern board game, Monte-Carlo Tree Search, search techniques},
   3012         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20F.Schadd.pdf},
   3013         url = {https://bibliography.gnunet.org},
   3014         author = {Frederik Christiaan Schadd}
   3015 }
   3016 @book {2009_7,
   3017         title = {Multi Party Distributed Private Matching, Set Disjointness and Cardinality of Set Intersection with Information Theoretic Security},
   3018         booktitle = {Cryptology and Network Security},
   3019         series = {Lecture Notes in Computer Science},
   3020         volume = {5888},
   3021         year = {2009},
   3022         pages = {21--40},
   3023         publisher = {Springer Berlin Heidelberg},
   3024         organization = {Springer Berlin Heidelberg},
   3025         abstract = {In this paper, we focus on the specific problems of Private Matching, Set Disjointness and Cardinality of Set Intersection in information theoretic settings. Specifically, we give perfectly secure protocols for the above problems in n party settings, tolerating a computationally unbounded semi-honest adversary, who can passively corrupt at most t < n/2 parties. To the best of our knowledge, these are the first such information theoretically secure protocols in a multi-party setting for all the three problems. Previous solutions for Distributed Private Matching and Cardinality of Set Intersection were cryptographically secure and the previous Set Disjointness solution, though information theoretically secure, is in a two party setting. We also propose a new model for Distributed Private matching which is relevant in a multi-party setting},
   3026         www_section = {Multiparty Computation, Privacy preserving Set operations},
   3027         isbn = {978-3-642-10432-9},
   3028         doi = {10.1007/978-3-642-10433-6_2},
   3029         url = {http://dx.doi.org/10.1007/978-3-642-10433-6_2},
   3030         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiParty2009Narayanan.pdf},
   3031         author = {Sathya Narayanan, G. and Aishwarya, T. and Agrawal, Anugrah and Patra, Arpita and Choudhary, Ashish and Pandu Rangan, C},
   3032         editor = {Garay, JuanA. and Miyaji, Atsuko and Otsuka, Akira}
   3033 }
   3034 @article {nymble-tdsc,
   3035         title = {Nymble: Blocking Misbehaving Users in Anonymizing Networks},
   3036         journal = {IEEE Transactions on Dependable and Secure Computing (TDSC)},
   3037         year = {2009},
   3038         month = {September},
   3039         abstract = {Anonymizing networks such as Tor allow users to access Internet services privately by using a series of routers to hide the client's IP address from the server. The success of such networks, however, has been limited by users employing this anonymity for abusive purposes such as defacing popular websites. Website administrators routinely rely on IP-address blocking for disabling access to misbehaving users, but blocking IP addresses is not practical if the abuser routes through an anonymizing network. As a result, administrators block {\em all} known exit nodes of anonymizing networks, denying anonymous access to honest and dishonest users alike. To address this problem, we present Nymble, a system in which \emph{servers can blacklist misbehaving users without compromising their anonymity}. Our system is thus agnostic to different servers' definitions of misbehavior {\textemdash} servers can block users for whatever reason, and the privacy of blacklisted users is maintained},
   3040         www_section = {authentication, privacy},
   3041         issn = {1545-5971},
   3042         doi = {10.1109/TDSC.2009.38},
   3043         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/TDSC.2009.38},
   3044         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nymble-tdsc.pdf},
   3045         author = {Patrick P. Tsang and Apu Kapadia and Cory Cornelius and Sean Smith}
   3046 }
   3047 @book {2009_8,
   3048         title = {An Optimally Fair Coin Toss},
   3049         booktitle = {Theory of Cryptography},
   3050         series = {Lecture Notes in Computer Science},
   3051         volume = {5444},
   3052         year = {2009},
   3053         pages = {1--18},
   3054         publisher = {Springer Berlin Heidelberg},
   3055         organization = {Springer Berlin Heidelberg},
   3056         abstract = {We address one of the foundational problems in cryptography: the bias of coin-flipping protocols. Coin-flipping protocols allow mutually distrustful parties to generate a common unbiased random bit, guaranteeing that even if one of the parties is malicious, it cannot significantly bias the output of the honest party. A classical result by Cleve [STOC '86] showed that for any two-party r-round coin-flipping protocol there exists an efficient adversary that can bias the output of the honest party by Ω(1/r). However, the best previously known protocol only guarantees O(1/√r) bias, and the question of whether Cleve's bound is tight has remained open for more than twenty years.
   3057 In this paper we establish the optimal trade-off between the round complexity and the bias of two-party coin-flipping protocols. Under standard assumptions (the existence of oblivious transfer), we show that Cleve's lower bound is tight: we construct an r-round protocol with bias O(1/r)},
   3058         isbn = {978-3-642-00456-8},
   3059         doi = {10.1007/978-3-642-00457-5_1},
   3060         url = {http://dx.doi.org/10.1007/978-3-642-00457-5_1},
   3061         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OptimallyFairCoinToss2009Moran.pdf},
   3062         author = {Moran, Tal and Naor, Moni and Segev, Gil},
   3063         editor = {Reingold, Omer}
   3064 }
   3065 @conference {2009_9,
   3066         title = {Optimization of distributed services with UNISONO},
   3067         booktitle = {GI/ITG KuVS Fachgespr{\"a}ch NGN Service Delivery Platforms \& Service Overlay Networks},
   3068         year = {2009},
   3069         abstract = {Distributed services are a special case of P2P networks where nodes have several distinctive tasks. Based on previous work, we show how UNISONO provides a way to optimize these services to increase performance, efficiency and user experience. UNISONO is a generic framework for host-based distributed network measurements. In this talk, we present UNISONO as an Enabler for self-organizing Service Delivery Plattforms. We give a short overview of the UNISONO concept and show how distributed services benefit from its usage},
   3070         www_section = {distributed systems, P2P},
   3071         author = {unknown},
   3072         url = {http://www.net.in.tum.de/de/mitarbeiter/holz/},
   3073         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/unisono_kuvs-ngn.pdf}
   3074 }
   3075 @conference {2009_10,
   3076         title = {Peer Profiling and Selection in the I2P Anonymous Network},
   3077         booktitle = {PET-CON 2009.1},
   3078         year = {2009},
   3079         month = mar,
   3080         address = {TU Dresden, Germany },
   3081         www_section = {I2P},
   3082         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/I2P-PET-CON-2009.1.pdf},
   3083         url = {https://bibliography.gnunet.org},
   3084         author = {Lars Schimmer}
   3085 }
   3086 @conference {p2p09-peersim,
   3087         title = {PeerSim: A Scalable P2P Simulator},
   3088         booktitle = {P2P'09--Proceedings of the 9th International Conference on Peer-to-Peer},
   3089         year = {2009},
   3090         month = sep,
   3091         pages = {99--100},
   3092         address = {Seattle, WA},
   3093         abstract = {The key features of peer-to-peer (P2P) systems are scalability and dynamism. The evaluation of a P2P protocol in realistic environments is very expensive and difficult to reproduce, so simulation is crucial in P2P research. PeerSim is an extremely scalable simulation environment that supports dynamic scenarios such as churn and other failure models. Protocols need to be specifically implemented for the PeerSim Java API, but with a reasonable effort they can be evolved into a real implementation. Testing in specified parameter-spaces is supported as well. PeerSim started out as a tool for our own research},
   3094         www_section = {P2P, peer-to-peer networking, PeerSim, simulation},
   3095         isbn = {978-1-4244-5066-4 },
   3096         doi = {http://dx.doi.org/10.1109/P2P.2009.5284506},
   3097         url = {http://peersim.sourceforge.net/},
   3098         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2709\%20-\%20PeerSim.pdf},
   3099         author = {Alberto Montresor and M{\'a}rk Jelasity and Gian Paolo Jesi and Spyros Voulgaris}
   3100 }
   3101 @conference {Plank:2009:PEE:1525908.1525927,
   3102         title = {A performance evaluation and examination of open-source erasure coding libraries for storage},
   3103         booktitle = {FAST'09--Proccedings of the 7th Conference on File and Storage Technologies},
   3104         year = {2009},
   3105         month = feb,
   3106         pages = {253--265},
   3107         publisher = {USENIX Association},
   3108         organization = {USENIX Association},
   3109         address = {San Francisco, CA, USA},
   3110         abstract = {Over the past five years, large-scale storage installations have required fault-protection beyond RAID-5, leading to a flurry of research on and development of erasure codes for multiple disk failures. Numerous open-source implementations of various coding techniques are available to the general public. In this paper, we perform a head-to-head comparison of these implementations in encoding and decoding scenarios. Our goals are to compare codes and implementations, to discern whether theory matches practice, and to demonstrate how parameter selection, especially as it concerns memory, has a significant impact on a code's performance. Additional benefits are to give storage system designers an idea of what to expect in terms of coding performance when designing their storage systems, and to identify the places where further erasure coding research can have the most impact},
   3111         www_section = {erasure coding, libraries, open-source, storage},
   3112         url = {http://www.usenix.org/event/fast09/tech/full_papers/plank/plank_html/},
   3113         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FAST\%2709\%20-\%20Open-source\%20erasure\%20coding\%20libraries\%20for\%20storage.pdf},
   3114         author = {James S. Plank and Luo, Jianqiang and Schuman, Catherine D. and Lihao Xu and Wilcox-O'Hearn, Zooko}
   3115 }
   3116 @conference {5328076,
   3117         title = {Performance Evaluation of On-Demand Multipath Distance Vector Routing Protocol under Different Traffic Models},
   3118         booktitle = {International Conference on Advances in Recent Technologies in Communication and Computing, 2009. ARTCom '09},
   3119         year = {2009},
   3120         month = oct,
   3121         pages = {77--80},
   3122         abstract = {Traffic models are the heart of any performance evaluation of telecommunication networks. Understanding the nature of traffic in high speed, high bandwidth communication system is essential for effective operation and performance evaluation of the networks. Many routing protocols reported in the literature for Mobile ad hoc networks(MANETS) have been primarily designed and analyzed under the assumption of CBR traffic models, which is unable to capture the statistical characteristics of the actual traffic. It is necessary to evaluate the performance properties of MANETs in the context of more realistic traffic models. In an effort towards this end, this paper evaluates the performance of adhoc on demand multipath distance vector (AOMDV) routing protocol in the presence of poisson and bursty self similar traffic and compares them with that of CBR traffic. Different metrics are considered in analyzing the performance of routing protocol including packet delivery ratio, throughput and end to end delay. Our simulation results indicate that the packet delivery fraction and throughput in AOMDV is increased in the presence of self similar traffic compared to other traffic. Moreover, it is observed that the end to end delay in the presence of self similar traffic is lesser than that of CBR and higher than that of poisson traffic},
   3123         www_section = {ad-hoc networks, AOMDV, distance vector, multi-path, performance},
   3124         doi = {10.1109/ARTCom.2009.31},
   3125         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/multipath-dv-perf.pdf},
   3126         author = {Malarkodi, B. and Rakesh, P. and Venkataramani, B.}
   3127 }
   3128 @conference { evans2009tor,
   3129         title = {A Practical Congestion Attack on Tor Using Long Paths},
   3130         booktitle = {18th USENIX Security Symposium},
   3131         year = {2009},
   3132         pages = {33--50},
   3133         publisher = {USENIX},
   3134         organization = {USENIX},
   3135         abstract = {In 2005, Murdoch and Danezis demonstrated the first practical congestion attack against a deployed anonymity network. They could identify which relays were on a target Tor user's path by building paths one at a time through every Tor relay and introducing congestion. However, the original attack was performed on only 13 Tor relays on the nascent and lightly loaded Tor network.
   3136 
   3137 We show that the attack from their paper is no longer practical on today's 1500-relay heavily loaded Tor network. The attack doesn't scale because a) the attacker needs a tremendous amount of bandwidth to measure enough relays during the attack window, and b) there are too many
   3138 false positives now that many other users are adding congestion at the same time as the attacks.
   3139 
   3140 We then strengthen the original congestion attack by combining it with a novel bandwidth amplification attack based on a flaw in the Tor design that lets us build long circuits that loop back on themselves. We show that this new combination attack is practical and effective by demonstrating a working attack on today's deployed Tor network. By coming up with a model to better understand Tor's routing behavior under congestion, we further provide a statistical analysis characterizing how effective our attack is in each case},
   3141         www_section = {anonymity, attack, denial-of-service, installation, Tor},
   3142         url = {http://grothoff.org/christian/tor.pdf},
   3143         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tor.pdf},
   3144         author = {Nathan S Evans and Roger Dingledine and Christian Grothoff}
   3145 }
   3146 @conference {Duminuco:2009:PSR:1584339.1584602,
   3147         title = {A Practical Study of Regenerating Codes for Peer-to-Peer Backup Systems},
   3148         booktitle = {ICDCS'09--Proceedings of the 29th IEEE International Conference on Distributed Computing Systems},
   3149         series = {ICDCS '09},
   3150         year = {2009},
   3151         month = jun,
   3152         pages = {376--384},
   3153         publisher = {IEEE Computer Society},
   3154         organization = {IEEE Computer Society},
   3155         address = {Montreal, Qu{\'e}bec, Canada},
   3156         abstract = {In distributed storage systems, erasure codes represent an attractive solution to add redundancy to stored data while limiting the storage overhead. They are able to provide the same reliability as replication requiring much less storage space. Erasure coding breaks the data into pieces that are encoded and then stored on different nodes. However, when storage nodes permanently abandon the system, new redundant pieces must be created. For erasure codes, generating a new piece requires the transmission of k pieces over the network, resulting in a k times higher reconstruction traffic as compared to replication. Dimakis proposed a new class of codes, called Regenerating Codes, which are able to provide both the storage efficiency of erasure codes and the communication efficiency of replication. However, Dimakis gave only a theoretical description of the codes without discussing implementation issues or computational costs. We have done a real implementation of Random Linear Regenerating Codes that allows us to measure their computational cost, which can be significant if the parameters are not chosen properly. However, we also find that there exist parameter values that result in a significant reduction of the communication overhead at the expense of a small increase in storage cost and computation, which makes these codes very attractive for distributed storage systems},
   3157         www_section = {Backup Systems, erasure codes, evaluation, peer-to-peer networking, Regenerating Codes, storage},
   3158         isbn = {978-0-7695-3659-0},
   3159         doi = {http://dx.doi.org/10.1109/ICDCS.2009.14},
   3160         url = {http://dx.doi.org/10.1109/ICDCS.2009.14},
   3161         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2709\%20-\%20Regenerating\%20codes\%20for\%20p2p\%20backup\%20systems.pdf},
   3162         author = {Alessandro Duminuco and E W Biersack}
   3163 }
   3164 @conference {2009_11,
   3165         title = {Privacy Integrated Queries: An Extensible Platform for Privacy-preserving Data Analysis},
   3166         booktitle = {Proceedings of the 2009 ACM SIGMOD International Conference on Management of Data},
   3167         year = {2009},
   3168         publisher = {ACM},
   3169         organization = {ACM},
   3170         address = {New York, NY, USA},
   3171         abstract = {We report on the design and implementation of the Privacy Integrated Queries (PINQ) platform for privacy-preserving data analysis. PINQ provides analysts with a programming interface to unscrubbed data through a SQL-like language. At the same time, the design of PINQ's analysis language and its careful implementation provide formal guarantees of differential privacy for any and all uses of the platform. PINQ's unconditional structural guarantees require no trust placed in the expertise or diligence of the analysts, substantially broadening the scope for design and deployment of privacy-preserving data analysis, especially by non-experts},
   3172         www_section = {anonymization, confidentiality, Differential Privacy, linq},
   3173         isbn = {978-1-60558-551-2},
   3174         doi = {10.1145/1559845.1559850},
   3175         url = {http://doi.acm.org.eaccess.ub.tum.de/10.1145/1559845.1559850},
   3176         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyIntergratedQueries2009McSherry.pdf},
   3177         author = {McSherry, Frank D.}
   3178 }
   3179 @conference {Tariqetal:2009:ProbLatencyBounds,
   3180         title = {Providing Probabilistic Latency Bounds for Dynamic Publish/Subscribe Systems},
   3181         booktitle = {Kommunikation in Verteilten Systemen (KiVS)},
   3182         year = {2009},
   3183         pages = {155--166},
   3184         publisher = {Gesellschaft fuer Informatik(GI)},
   3185         organization = {Gesellschaft fuer Informatik(GI)},
   3186         abstract = {In the context of large decentralized many-to-many communication systems it is impractical to provide realistic and hard bounds for certain QoS metrics including latency bounds. Nevertheless, many applications can yield better performance if such bounds hold with a given probability. In this paper we show how probabilistic latency bounds can be applied in the context of publish/subscribe. We present an algorithm for maintaining individual probabilistic latency bounds in a highly dynamic environment for a large number of subscribers. The algorithm consists of an adaptive dissemination algorithm as well as a cluster partitioning scheme. Together they ensure i) adaptation to the individual latency requirements of subscribers under dynamically changing system properties, and ii) scalability by determining appropriate clusters according to available publishers in the system},
   3187         www_section = {publish/subscribe, QoS},
   3188         isbn = {978-3-540-92666-5},
   3189         doi = {10.1007/978-3-540-92666-5},
   3190         url = {http://www.springerlink.com/content/x36578745jv7wr88/},
   3191         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/QoS_pubsub.pdf},
   3192         author = {Tariq, Muhammad Adnan and Boris Koldehofe and Gerald G. Koch and Kurt Rothermel}
   3193 }
   3194 @conference {conf/infocom/WuLR09,
   3195         title = {Queuing Network Models for Multi-Channel P2P Live Streaming Systems},
   3196         booktitle = {INFOCOM'09. Proceedings of the 28th IEEE International Conference on Computer Communications},
   3197         year = {2009},
   3198         month = apr,
   3199         pages = {73--81},
   3200         publisher = {IEEE Computer Society},
   3201         organization = {IEEE Computer Society},
   3202         address = {Rio de Janeiro, Brazil},
   3203         abstract = {In recent years there have been several large-scale deployments of P2P live video systems. Existing and future P2P live video systems will offer a large number of channels, with users switching frequently among the channels. In this paper, we develop infinite-server queueing network models to analytically study the performance of multi-channel P2P streaming systems. Our models capture essential aspects of multi-channel video systems, including peer channel switching, peer churn, peer bandwidth heterogeneity, and Zipf-like channel popularity. We apply the queueing network models to two P2P streaming designs: the isolated channel design (ISO) and the View-Upload Decoupling (VUD) design. For both of these designs, we develop efficient algorithms to calculate critical performance measures, develop an asymptotic theory to provide closed-form results when the number of peers approaches infinity, and derive near- optimal provisioning rules for assigning peers to groups in VUD. We use the analytical results to compare VUD with ISO. We show that VUD design generally performs significantly better, particularly for systems with heterogeneous channel popularities and streaming rates},
   3204         www_section = {dblp, multi-channel, p2p streaming system},
   3205         isbn = {978-1-4244-3512-8 },
   3206         doi = {http://dx.doi.org/10.1109/INFCOM.2009.5061908},
   3207         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Queusing\%20models\%20for\%20p2p\%20streaming\%20systems.pdf},
   3208         author = {Wu, Di and Yong Liu and Keith W. Ross}
   3209 }
   3210 @conference {wpes09-bridge-attack,
   3211         title = {On the risks of serving whenever you surf: Vulnerabilities in Tor's blocking resistance design},
   3212         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2009)},
   3213         year = {2009},
   3214         month = {November},
   3215         publisher = {ACM},
   3216         organization = {ACM},
   3217         abstract = {In Tor, a bridge is a client node that volunteers to help censored users access Tor by serving as an unlisted, first-hop relay. Since bridging is voluntary, the success of this circumvention mechanism depends critically on the willingness of clients to act as bridges. We identify three key architectural shortcomings of the bridge design: (1) bridges are easy to find; (2) a bridge always accepts connections when its operator is using Tor; and (3) traffic to and from clients connected to a bridge interferes with traffic to and from the bridge operator. These shortcomings lead to an attack that can expose the IP address of bridge operators visiting certain web sites over Tor. We also discuss mitigation mechanisms},
   3218         www_section = {blocking resistance},
   3219         isbn = {978-1-60558-783-7},
   3220         doi = {10.1145/1655188.1655193},
   3221         url = {http://portal.acm.org/citation.cfm?id=1655193},
   3222         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes09-bridge-attack.pdf},
   3223         author = {Jon McLachlan and Nicholas J. Hopper}
   3224 }
   3225 @article {2009_12,
   3226         title = {Robust Random Number Generation for Peer-to-Peer Systems},
   3227         journal = {Theor. Comput. Sci},
   3228         volume = {410},
   3229         year = {2009},
   3230         pages = {453--466},
   3231         abstract = {We consider the problem of designing an efficient and robust distributed random number generator for peer-to-peer systems that is easy to implement and works even if all communication channels are public. A robust random number generator is crucial for avoiding adversarial join-leave attacks on peer-to-peer overlay networks. We show that our new generator together with a light-weight rule recently proposed in [B. Awerbuch, C. Scheideler, Towards a scalable and robust DHT, in: Proc. of the 18th ACM Symp. on Parallel Algorithms and Architectures, SPAA, 2006. See also http://www14.in.tum.de/personen/scheideler] for keeping peers well distributed can keep various structured overlay networks in a robust state even under a constant fraction of adversarial peers },
   3232         www_section = {Join-leave attacks, Peer-to-peer systems, Random number generation},
   3233         issn = {0304-3975},
   3234         doi = {10.1016/j.tcs.2008.10.003},
   3235         url = {http://dx.doi.org/10.1016/j.tcs.2008.10.003},
   3236         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OPODIS-116b.pdf},
   3237         author = {Awerbuch, Baruch and Scheideler, Christian}
   3238 }
   3239 @conference {1658999,
   3240         title = {Scalable landmark flooding: a scalable routing protocol for WSNs},
   3241         booktitle = {Co-Next Student Workshop '09: Proceedings of the 5th international student workshop on Emerging networking experiments and technologies},
   3242         year = {2009},
   3243         pages = {1--2},
   3244         publisher = {ACM},
   3245         organization = {ACM},
   3246         address = {New York, NY, USA},
   3247         abstract = {Wireless sensor networks (WSNs) are about to become a popular and inexpensive tool for all kinds of applications. More advanced applications also need end-to-end routing, which goes beyond the simple data dissemination and collection mechanisms of early WSNs. The special properties of WSNs -- scarce memory, CPU, and energy resources -- make this a challenge. The Dynamic Address Routing protocol (DART) could be a good candidate for WSN routing, if it were not so prone to link outages.
   3248 
   3249 In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protocol for large WSNs. It combines ideas from landmark routing, flooding, and dynamic address routing. SLF is robust against link and node outages, requires only little routing state, and generates low maintenance traffic overhead},
   3250         www_section = {wireless sensor network},
   3251         isbn = {978-1-60558-751-6},
   3252         doi = {10.1145/1658997.1658999},
   3253         url = {http://portal.acm.org/citation.cfm?id=1658997.1658999$\#$},
   3254         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di09slf.pdf},
   3255         author = {Di, Pengfei and Thomas Fuhrmann}
   3256 }
   3257 @conference {ccs09-torsk,
   3258         title = {Scalable onion routing with Torsk},
   3259         booktitle = {Proceedings of CCS 2009},
   3260         year = {2009},
   3261         month = {November},
   3262         publisher = {ACM  New York, NY, USA},
   3263         organization = {ACM  New York, NY, USA},
   3264         abstract = {We introduce Torsk, a structured peer-to-peer low-latency anonymity protocol. Torsk is designed as an interoperable replacement for the relay selection and directory service of the popular Tor anonymity network, that decreases the bandwidth cost of relay selection and maintenance from quadratic to quasilinear while introducing no new attacks on the anonymity provided by Tor, and no additional delay to connections made via Tor. The resulting bandwidth savings make a modest-sized Torsk network significantly cheaper to operate, and allows low-bandwidth clients to join the network.
   3265 
   3266 Unlike previous proposals for P2P anonymity schemes, Torsk does not require all users to relay traffic for others. Torsk utilizes a combination of two P2P lookup mechanisms with complementary strengths in order to avoid attacks on the confidentiality and integrity of lookups. We show by analysis that previously known attacks on P2P anonymity schemes do not apply to Torsk, and report on experiments conducted with a 336-node wide-area deployment of Torsk, demonstrating its efficiency and feasibility},
   3267         www_section = {P2P},
   3268         isbn = {978-1-60558-894-0},
   3269         doi = {10.1145/1653662.1653733},
   3270         url = {http://portal.acm.org/citation.cfm?id=1653662.1653733},
   3271         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs09-torsk.pdf},
   3272         author = {Jon McLachlan and Andrew Tran and Nicholas J. Hopper and Yongdae Kim}
   3273 }
   3274 @conference {2009_13,
   3275         title = {Security and Privacy Challenges in the Internet of Things},
   3276         booktitle = {Proceedings of KiVS Workshop on Global Sensor Networks (GSN09)},
   3277         year = {2009},
   3278         note = {http://eceasst.cs.tu-berlin.de/index.php/eceasst/article/download/208/205},
   3279         abstract = {The future Internet of Things as an intelligent collaboration of miniaturized sensors poses new challenges to security and end-user privacy. The ITU has identified that the protection of data and privacy of users is one of the key challenges in the Internet of Things [Int05]: lack of confidence about privacy will result in decreased adoption among users and therefore is one of the driving factors in the success of the Internet of Things. This paper gives an overview, categorization, and analysis of security and privacy challenges in the Internet of Things},
   3280         url = {http://doc.tm.uka.de/2009/security-gsn-camera-ready.pdf},
   3281         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gsn09-security-mayer.pdf},
   3282         author = {Mayer, Christoph P.}
   3283 }
   3284 @book {2009_14,
   3285         title = {Self-organized Data Redundancy Management for Peer-to-Peer Storage Systems},
   3286         booktitle = {Self-Organizing Systems},
   3287         series = {Lecture Notes in Computer Science},
   3288         volume = {Volume 5918/2009},
   3289         year = {2009},
   3290         pages = {65--76},
   3291         abstract = {In peer-to-peer storage systems, peers can freely join and leave the system at any time. Ensuring high data availability in such an environment is a challenging task. In this paper we analyze the costs of achieving data availability in fully decentralized peer-to-peer systems. We mainly address the problem of churn and what effect maintaining availability has on network bandwidth. We discuss two different redundancy techniques -- replication and erasure coding -- and consider their monitoring and repairing costs analytically. We calculate the bandwidth costs using basic costs equations and two different Markov reward models. One for centralized monitoring system and the other for distributed monitoring. We show a comparison of the numerical results accordingly. Depending on these results, we determine the best redundancy and maintenance strategy that corresponds to peer's failure probability},
   3292         www_section = {distributed storage, Markov chain},
   3293         issn = {978-3-642-10864-8},
   3294         doi = {10.1007/978-3-642-10865-5},
   3295         url = {http://www.springerlink.com/content/28660w27373vh408/},
   3296         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fulltext3.pdf},
   3297         publisher = {unknown},
   3298         author = {Yaser Houri and Manfred Jobmann and Thomas Fuhrmann}
   3299 }
   3300 @conference {2009_15,
   3301         title = {ShadowWalker: Peer-to-peer Anonymous Communication Using Redundant Structured Topologies},
   3302         booktitle = {Proceedings of the 16th ACM Conference on Computer and Communications Security},
   3303         year = {2009},
   3304         publisher = {ACM},
   3305         organization = {ACM},
   3306         address = {New York, NY, USA},
   3307         abstract = {Peer-to-peer approaches to anonymous communication pro-
   3308 mise to eliminate the scalability concerns and central vulner-
   3309 ability points of current networks such as Tor. However, the
   3310 P2P setting introduces many new opportunities for attack,
   3311 and previous designs do not provide an adequate level of
   3312 anonymity. We propose ShadowWalker: a new low-latency
   3313 P2P anonymous communication system, based on a random
   3314 walk over a redundant structured topology. We base our de-
   3315 sign on shadows that redundantly check and certify neigh-
   3316 bor information; these certifications enable nodes to perform
   3317 random walks over the structured topology while avoiding
   3318 route capture and other attacks.
   3319 We analytically calculate the anonymity provided by Sha-
   3320 dowWalker and show that it performs well for moderate lev-
   3321 els of attackers, and is much better than the state of the art.
   3322 We also design an extension that improves forwarding per-
   3323 formance at a slight anonymity cost, while at the same time
   3324 protecting against selective DoS attacks. We show that our
   3325 system has manageable overhead and can handle moderate
   3326 churn, making it an attractive new design for P2P anony-
   3327 mous communication},
   3328         www_section = {anonymity, peer-to-peer, random walks},
   3329         isbn = {978-1-60558-894-0},
   3330         doi = {10.1145/1653662.1653683},
   3331         url = {http://doi.acm.org/10.1145/1653662.1653683},
   3332         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shadowwalker-ccs09.pdf},
   3333         author = {Mittal, Prateek and Borisov, Nikita}
   3334 }
   3335 @article {2009_16,
   3336         title = {A Software and Hardware IPTV Architecture for Scalable DVB Distribution},
   3337         journal = {International Journal of Digital Multimedia Broadcasting},
   3338         volume = {2009},
   3339         year = {2009},
   3340         abstract = {Many standards and even more proprietary technologies deal with IP-based television (IPTV). But none of them can transparently map popular public broadcast services such as DVB or ATSC to IPTV with acceptable effort. In this paper we explain why we believe that such a mapping using a light weight framework is an important step towards all-IP multimedia. We then present the NetCeiver architecture: it is based on well-known standards such as IPv6, and it allows zero configuration. The use of multicast streaming makes NetCeiver highly scalable. We also describe a low cost FPGA implementation of the proposed NetCeiver architecture, which can concurrently stream services from up to six full transponders},
   3341         www_section = {DVB, IPTV, multicast},
   3342         author = {unknown},
   3343         doi = {10.1155/2009/617203},
   3344         url = {http://www.hindawi.com/journals/ijdmb/2009/617203.html},
   3345         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/617203.pdf},
   3346         editor = {Georg Acher and Detlef Fliegl and Thomas Fuhrmann}
   3347 }
   3348 @mastersthesis {2009_17,
   3349         title = {Solving very large distributed constraint satisfaction problems},
   3350         volume = {Doctor of Philosophy},
   3351         year = {2009},
   3352         month = dec,
   3353         pages = {0--211},
   3354         school = {University of Wollongog, New South Wales, Australia},
   3355         type = {PhD},
   3356         address = {Wollongog, New South Wales, Australia},
   3357         abstract = {This thesis investigates issues with existing approaches to distributed constraint satisfaction, and proposes a solution in the form of a new algorithm. These issues are most evident when solving large distributed constraint satisfaction problems, hence the title of the thesis.
   3358 We will first survey existing algorithms for centralised constraint satisfaction, and describe how they have been modified to handle distributed constraint satisfaction. The method by which each algorithm achieves completeness will be investigated and analysed by application of a new theorem.
   3359 We will then present a new algorithm, Support-Based Distributed Search, developed explicitly for distributed constraint satisfaction rather than being derived from centralised algorithms. This algorithm is inspired by the inherent structure of human arguments and similar mechanisms we observe in real-world negotiations.
   3360 A number of modifications to this new algorithm are considered, and comparisons are made with existing algorithms, effectively demonstrating its place within the field. Empirical analysis is then conducted, and comparisons are made to state-of-the-art algorithms most able to handle large distributed constraint satisfaction problems.
   3361 Finally, it is argued that any future development in distributed constraint satisfaction will necessitate changes in the algorithms used to solve small {\textquoteleft}embedded' constraint satisfaction problems. The impact on embedded constraint satisfaction problems is considered, with a brief presentation of an improved algorithm for hypertree decomposition},
   3362         www_section = {algorithms, distributed constraint satisfaction},
   3363         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20P.Harvey.pdf},
   3364         author = {Peter Harvey}
   3365 }
   3366 @conference {DBLP:conf/sp/DanezisG09,
   3367         title = {Sphinx: A Compact and Provably Secure Mix Format},
   3368         booktitle = {Proceedings of the 30th IEEE Symposium on Security and Privacy (S\&P 2009), 17-20 May, Oakland, California, USA},
   3369         year = {2009},
   3370         pages = {269--282},
   3371         publisher = {IEEE Computer Society},
   3372         organization = {IEEE Computer Society},
   3373         abstract = {Sphinx is a cryptographic message format used to relay anonymized messages within a mix network. It is more compact than any comparable scheme, and supports a full set of security features: indistinguishable replies, hiding the path length and relay position, as well as providing unlinkability for each leg of the message's journey over the network. We prove the full cryptographic security of Sphinx in the random oracle model, and we describe how it can be used as an efficient drop-in replacement in deployed remailer systems},
   3374         www_section = {anonymity, cryptography},
   3375         isbn = {978-0-7695-3633-0},
   3376         doi = {10.1109/SP.2009.15},
   3377         url = {http://portal.acm.org/citation.cfm?id=1607723.1608138},
   3378         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DanezisG09.pdf},
   3379         author = {George Danezis and Ian Goldberg}
   3380 }
   3381 @conference {2009_18,
   3382         title = {SPINE : Adaptive Publish/Subscribe for Wireless Mesh Networks},
   3383         booktitle = {Proc of the 8th IEEE International Conference on Innovative Internet Community Systems (I2CS 2008)},
   3384         year = {2009},
   3385         abstract = {Application deployment on Wireless Mesh Networks (WMNs) is a challenging issue. First it requires communication abstractions that allow for interoperation with Internet applications and second the offered solution should be sensitive to the available resources in the underlying network. Loosely coupled communication abstractions, like publish/subscribe, promote interoperability, but unfortunately are typically implemented at the application layer without considering the available resources at the underlay imposing a significant degradation of application performance in the setting of Wireless Mesh Networks. In this paper we present SPINE, a content-based publish/subscribe system, which considers the particular challenges of deploying application-level services in Wireless Mesh Networks. SPINE is designed to reduce the overhead which stems from both publications and reconfigurations, to cope with the inherent capacity limitations on communication links as well as with mobility of the wireless mesh-clients. We demonstrate the effectiveness of SPINE by comparison with traditional approaches in implementing content-based publish/subscribe},
   3386         www_section = {mesh networks, publish/subscribe},
   3387         url = {http://studia.complexica.net/index.php?option=com_content\&view=article\&id=116\%3Aspine--adaptive-publishsubscribe-for-wireless-mesh-networks-pp-320-353\&catid=47\%3Anumber-3\&Itemid=89\&lang=fr},
   3388         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RI070302.pdf},
   3389         author = {Jorge Alfonso Briones-Garc{\i}a and Boris Koldehofe and Kurt Rothermel}
   3390 }
   3391 @booklet {2009_19,
   3392         title = {SpoVNet Security Task Force Report},
   3393         volume = {ISSN 1613-849X},
   3394         number = {TM-2009-2},
   3395         year = {2009},
   3396         publisher = {Institute of Telematics, Universit{\"a}t Karlsruhe (TH)},
   3397         type = {Telematics Technical Report},
   3398         url = {http://doc.tm.uka.de/2009/TM-2009-3.pdf},
   3399         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TM-2009-3.pdf},
   3400         author = {Ralph Holz and Mayer, Christoph P. and Sebastian Mies and Heiko Niedermayer and Tariq, Muhammad Adnan}
   3401 }
   3402 @conference {DBLP:conf/infocom/LandaGCMR09,
   3403         title = {A Sybilproof Indirect Reciprocity Mechanism for Peer-to-Peer Networks},
   3404         booktitle = {INFOCOM 2009. The 28th IEEE International Conference on Computer Communications},
   3405         year = {2009},
   3406         month = apr,
   3407         pages = {343--351},
   3408         publisher = {IEEE Computer Society},
   3409         organization = {IEEE Computer Society},
   3410         address = {Rio de Janeiro, Brazil},
   3411         abstract = {Although direct reciprocity (Tit-for-Tat) contribution systems have been successful in reducing free-loading in peer-to-peer overlays, it has been shown that, unless the contribution network is dense, they tend to be slow (or may even fail) to converge [1]. On the other hand, current indirect reciprocity mechanisms based on reputation systems tend to be susceptible to sybil attacks, peer slander and whitewashing.In this paper we present PledgeRoute, an accounting mechanism for peer contributions that is based on social capital. This mechanism allows peers to contribute resources to one set of peers and use this contribution to obtain services from a different set of peers, at a different time. PledgeRoute is completely decentralised, can be implemented in both structured and unstructured peer-to-peer systems, and it is resistant to the three kinds of attacks mentioned above.To achieve this, we model contribution transitivity as a routing problem in the contribution network of the peer-to-peer overlay, and we present arguments for the routing behaviour and the sybilproofness of our contribution transfer procedures on this basis. Additionally, we present mechanisms for the seeding of the contribution network, and a combination of incentive mechanisms and reciprocation policies that motivate peers to adhere to the protocol and maximise their service contributions to the overlay},
   3412         www_section = {p2p network, reprocity mechanism, sybilproof},
   3413         isbn = {978-1-4244-3512-8},
   3414         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Sybilproof\%20Indirect\%20Reprocity\%20Mechanism\%20for\%20P2P\%20Networks\%20.pdf},
   3415         author = {Raul Leonardo Landa Gamiochipi and David Griffin and Richard G. Clegg and Eleni Mykoniati and Miguel Rio}
   3416 }
   3417 @conference {Resnick:2009:STT:1566374.1566423,
   3418         title = {Sybilproof Transitive Trust Protocols},
   3419         booktitle = {EC'09. Proceedings of the 10th ACM Conference on Electronic commerce},
   3420         series = {EC '09},
   3421         year = {2009},
   3422         month = jul,
   3423         pages = {345--354},
   3424         publisher = {ACM},
   3425         organization = {ACM},
   3426         address = {Stanford, California, USA},
   3427         abstract = {We study protocols to enable one user (the principal) to make potentially profitable but risky interactions with another user (the agent), in the absence of direct trust between the two parties. In such situations, it is possible to enable the interaction indirectly through a chain of credit or "trust" links. We introduce a model that provides insight into many disparate applications, including open currency systems, network trust aggregation systems, and manipulation-resistant recommender systems. Each party maintains a trust account for each other party. When a principal's trust balance for an agent is high enough to cover potential losses from a bad interaction, direct trust is sufficient to enable the interaction. Allowing indirect trust opens up more interaction opportunities, but also expands the strategy space of an attacker seeking to exploit the community for its own ends. We show that with indirect trust exchange protocols, some friction is unavoidable: any protocol that satisfies a natural strategic safety property that we call sum-sybilproofness can sometimes lead to a reduction in expected overall trust balances even on interactions that are profitable in expectation. Thus, for long-term growth of trust accounts, which are assets enabling risky but valuable interactions, it may be necessary to limit the use of indirect trust. We present the hedged-transitive protocol and show that it achieves the optimal rate of expected growth in trust accounts, among all protocols satisfying the sum-sybilproofness condition},
   3428         www_section = {indirect reciprocity, open currency, recommender system, reputation system, sybilproof, transitive trust},
   3429         isbn = {978-1-60558-458-4},
   3430         doi = {http://doi.acm.org/10.1145/1566374.1566423},
   3431         url = {http://doi.acm.org/10.1145/1566374.1566423},
   3432         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2709\%20-\%20Sybilproof\%20transitive\%20trust\%20protocols.pdf},
   3433         author = {Paul Resnick and Sami, Rahul}
   3434 }
   3435 @mastersthesis {Douglas-thesis,
   3436         title = {A taxonomy for and analysis of anonymous communications networks},
   3437         year = {2009},
   3438         month = mar,
   3439         school = {Air Force Institute of Technology},
   3440         type = {phd},
   3441         abstract = {Any entity operating in cyberspace is susceptible to debilitating attacks. With cyber attacks intended to gather intelligence and disrupt communications rapidly replacing the threat of conventional and nuclear attacks, a new age of warfare is at hand. In 2003, the United States acknowledged that the speed and anonymity of cyber attacks makes distinguishing among the actions of terrorists, criminals, and nation states difficult. Even President Obama's Cybersecurity Chief-elect feels challenged by the increasing sophistication of cyber attacks. Indeed, the rising quantity and ubiquity of new surveillance technologies in cyberspace enables instant, undetectable, and unsolicited information collection about entities. Hence, anonymity and privacy are becoming increasingly important issues. Anonymization enables entities to protect their data and systems from a diverse set of cyber attacks and preserve privacy. This research provides a systematic analysis of anonymity degradation, preservation and elimination in cyberspace to enchance the security of information assets. This includes discovery/obfuscation of identities and actions of/from potential adversaries. First, novel taxonomies are developed for classifying and comparing the wide variety of well-established and state-of-the-art anonymous networking protocols. These expand the classical definition of anonymity and are the first known to capture the peer-to-peer and mobile ad hoc anonymous protocol family relationships. Second, a unique synthesis of state-of-the-art anonymity metrics is provided. This significantly aids an entities ability to reliably measure changing anonymity levels; thereby, increasing their ability to defend against cyber attacks. Finally, a novel epistemic-based model is created to characterize how an adversary reasons with knowledge to degrade anonymity},
   3442         url = {http://oai.dtic.mil/oai/oai?verb=getRecord\&metadataPrefix=html\&identifier=ADA495688},
   3443         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Douglas-thesis.pdf},
   3444         author = {Douglas Kelly}
   3445 }
   3446 @conference {2009_20,
   3447         title = {Towards End-to-End Connectivity for Overlays across Heterogeneous Networks},
   3448         booktitle = {Proc. Int. Workshop on the Network of the Future (Future-Net 2009), co-located with IEEE Int. Conf. on Communications (ICC 2009)},
   3449         year = {2009},
   3450         address = {Dresden, Germany},
   3451         abstract = {The incremental adoption of IPv6, middle boxes (e.g., NATs, Firewalls) as well as completely new network types and protocols paint a picture of a future Internet that consists of extremely heterogeneous edge networks (e.g. IPv4, IPv6, industrial Ethernet, sensor networks) that are not supposed or able to communicate directly. This increasing heterogeneity imposes severe challenges for overlay networks, which are considered as a potential migration strategy towards the future Internet since they can add new functionality and services in a distributed and self-organizing manner. Unfortunately, overlays are based on end-to-end connectivity and, thus, their deployment is hindered by network heterogeneity. In this paper, we take steps towards a solution to enable overlay connections in such heterogeneous networks, building upon a model of heterogeneous networks that comprises several connectivity domains with direct connectivity, interconnected by relays. As major contribution, we present a distributed protocol that detects the boundaries of connectivity domains as well as relays using a gossiping approach. Furthermore, the protocol manages unique identifiers of connectivity domains and efficiently handles domain splitting and merging due to underlay changes. Simulation studies indicate that the algorithm can handle splitting and merging of connectivity domains in reasonable time and is scalable with respect to control overhead},
   3452         isbn = {978-1-4244-3437-4 },
   3453         doi = {10.1109/ICCW.2009.5207975  },
   3454         url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5207975},
   3455         author = {Sebastian Mies and Oliver Waldhorst and Hans Wippel}
   3456 }
   3457 @conference {2009_21,
   3458         title = {Traffic Engineering vs. Content Distribution: A Game Theoretic Perspective},
   3459         booktitle = {INFOCOM 2009. The 28th IEEE International Conference on Computer Communications},
   3460         year = {2009},
   3461         month = apr,
   3462         pages = {540--548},
   3463         publisher = {IEEE Computer Society},
   3464         organization = {IEEE Computer Society},
   3465         address = {Rio de Janeiro},
   3466         abstract = {In this paper we explore the interaction between content distribution and traffic engineering. Because a traffic engineer may be unaware of the structure of content distribution systems or overlay networks, this management of the network does not fully anticipate how traffic might change as a result of his actions. Content distribution systems that assign servers at the application level can respond very rapidly to changes in the routing of the network. Consequently, the traffic engineer's decisions may almost never be applied to the intended traffic. We use a game-theoretic framework in which infinitesimal users of a network select the source of content, and the traffic engineer decides how the traffic will route through the network. We formulate a game and prove the existence of equilibria. Additionally, we present a setting in which equilibria are socially optimal, essentially unique, and stable. Conditions under which efficiency loss may be bounded are presented, and the results are extended to the cases of general overlay networks and multiple autonomous systems},
   3467         www_section = {content distribution, traffic engineering},
   3468         isbn = {978-1-4244-3512-8},
   3469         doi = {10.1109/INFCOM.2009.5061960},
   3470         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Traffic\%20Engineering\%20vs.\%20Content\%20Distribution.PDF},
   3471         author = {Dominic DiPalantino and Ramesh Johari}
   3472 }
   3473 @conference {morphing09,
   3474         title = {Traffic Morphing: An efficient defense against statistical traffic analysis},
   3475         booktitle = {Proceedings of the Network and Distributed Security Symposium--{NDSS} '09},
   3476         year = {2009},
   3477         month = feb,
   3478         publisher = {IEEE},
   3479         organization = {IEEE},
   3480         abstract = {Recent work has shown that properties of network traffic that remain observable after encryption, namely packet sizes and timing, can reveal surprising information about the traffic's contents (e.g., the language of a VoIP call [29], passwords in secure shell logins [20], or even web browsing habits [21, 14]). While there are some legitimate uses for encrypted traffic analysis, these techniques also raise important questions about the privacy of encrypted communications. A common tactic for
   3481 mitigating such threats is to pad packets to uniform sizes or to send packets at fixed timing intervals; however, this approach is often inefficient. In this paper, we propose a novel method for thwarting statistical traffic analysis
   3482 algorithms by optimally morphing one class of traffic to look like another class. Through the use of convex optimization
   3483 techniques, we show how to optimally modify packets in real-time to reduce the accuracy of a variety of traffic classifiers while incurring much less overhead than padding. Our evaluation of this technique against two published traffic classifiers for VoIP [29] and web traffic [14] shows that morphing works well on a wide range of network data{\textemdash}in some cases, simultaneously providing better privacy and lower overhead than na{\textasciidieresis}{\i}ve defenses},
   3484         www_section = {privacy, traffic analysis, VoIP},
   3485         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphing09.pdf},
   3486         author = {Charles Wright and Scott Coull and Fabian Monrose}
   3487 }
   3488 @book {2009_22,
   3489         title = {Tuning Vivaldi: Achieving Increased Accuracy and Stability},
   3490         booktitle = {Self-Organizing Systems},
   3491         series = {Lecture Notes in Computer Science},
   3492         volume = {Volume 5918/2009},
   3493         year = {2009},
   3494         pages = {174--184},
   3495         abstract = {Network Coordinates are a basic building block for most peer-to-peer applications nowadays. They optimize the peer selection process by allowing the nodes to preferably attach to peers to whom they then experience a low round trip time. Albeit there has been substantial research effort in this topic over the last years, the optimization of the various network coordinate algorithms has not been pursued systematically yet. Analyzing the well-known Vivaldi algorithm and its proposed optimizations with several sets of extensive Internet traffic traces, we found that in face of current Internet data most of the parameters that have been recommended in the original papers are a magnitude too high. Based on this insight, we recommend modified parameters that improve the algorithms' performance significantly},
   3496         isbn = {978-3-642-10864-8},
   3497         issn = {0302-9743},
   3498         doi = {10.1007/978-3-642-10865-5},
   3499         url = {http://www.springerlink.com/content/h7r3q58251x72155/},
   3500         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fulltext.pdf},
   3501         publisher = {unknown},
   3502         author = {Benedikt Elser and Andreas F{\"o}rschler and Thomas Fuhrmann}
   3503 }
   3504 @conference {1582481,
   3505         title = {Using link-layer broadcast to improve scalable source routing},
   3506         booktitle = {IWCMC '09: Proceedings of the 2009 International Conference on Wireless Communications and Mobile Computing},
   3507         year = {2009},
   3508         month = jan,
   3509         pages = {466--471},
   3510         publisher = {ACM},
   3511         organization = {ACM},
   3512         address = {New York, NY, USA},
   3513         abstract = {Scalable source routing (SSR) is a network layer routing protocol that provides services that are similar to those of structured peer-to-peer overlays.
   3514 
   3515 In this paper, we describe several improvements to the SSR protocol. They aim at providing nodes with more up-to-date routing information: 1. The use of link-layer broadcast enables all neighbors of a node to contribute to the forwarding process. 2. A light-weight and fast selection mechanism avoids packet duplication and optimizes the source route iteratively. 3. Nodes implicitly learn the network's topology from overheard broadcast messages.
   3516 
   3517 We present simulation results which show the performance gain of the proposed improvements: 1. The delivery ratio in settings with high mobility increases. 2. The required per-node state can be reduced as compared with the original SSR protocol. 3. The route stretch decreases. --- These improvements are achieved without increasing the routing overhead},
   3518         www_section = {mobile Ad-hoc networks, P2P, routing, scalable source routing},
   3519         isbn = {978-1-60558-569-7},
   3520         doi = {10.1145/1582379.1582481},
   3521         url = {http://portal.acm.org/citation.cfm?id=1582481$\#$},
   3522         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di09broadcastssr.pdf},
   3523         author = {Di, Pengfei and Thomas Fuhrmann}
   3524 }
   3525 @conference {ccsw09-fingerprinting,
   3526         title = {Website fingerprinting: attacking popular privacy enhancing technologies with the multinomial naive-bayes classifier},
   3527         booktitle = {Proceedings of the 2009 ACM workshop on Cloud computing security (CCSW '09)},
   3528         year = {2009},
   3529         pages = {31--42},
   3530         publisher = {ACM},
   3531         organization = {ACM},
   3532         address = {New York, NY, USA},
   3533         abstract = {Privacy enhancing technologies like OpenSSL, OpenVPN or Tor establish an encrypted tunnel that enables users to hide content and addresses of requested websites from external observers This protection is endangered by local traffic analysis attacks that allow an external, passive attacker between the PET system and the user to uncover the identity of the requested sites. However, existing proposals for such attacks are not practicable yet.
   3534 
   3535 We present a novel method that applies common text mining techniques to the normalised frequency distribution of observable IP packet sizes. Our classifier correctly identifies up to 97\% of requests on a sample of 775 sites and over 300,000 real-world traffic dumps recorded over a two-month period. It outperforms previously known methods like Jaccard's classifier and Na{\"\i}ve Bayes that neglect packet frequencies altogether or rely on absolute frequency values, respectively. Our method is system-agnostic: it can be used against any PET without alteration. Closed-world results indicate that many popular single-hop and even multi-hop systems like Tor and JonDonym are vulnerable against this general fingerprinting attack. Furthermore, we discuss important real-world issues, namely false alarms and the influence of the browser cache on accuracy},
   3536         www_section = {forensics, latency, text mining, traffic analysis},
   3537         isbn = {978-1-60558-784-4},
   3538         doi = {10.1145/1655008.1655013},
   3539         url = {http://portal.acm.org/citation.cfm?id=1655013\&dl=GUIDE\&coll=GUIDE\&CFID=83763210\&CFTOKEN=75697565},
   3540         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccsw09-fingerprinting.pdf},
   3541         author = {Dominik Herrmann and Rolf Wendolsky and Hannes Federrath}
   3542 }
   3543 @conference {1590633,
   3544         title = {Wireless Sensor Networks: A Survey},
   3545         booktitle = {WAINA '09: Proceedings of the 2009 International Conference on Advanced Information Networking and Applications Workshops},
   3546         year = {2009},
   3547         pages = {636--641},
   3548         publisher = {IEEE Computer Society},
   3549         organization = {IEEE Computer Society},
   3550         address = {Washington, DC, USA},
   3551         abstract = {Wireless Sensor Networks (WSN), an element of pervasive computing, are presently being used on a large scale to monitor real-time environmental status. However these sensors operate under extreme energy constraints and are designed by keeping an application in mind. Designing a new wireless sensor node is extremely challenging task and involves assessing a number of different parameters required by the target application, which includes range, antenna type, target technology, components, memory, storage, power, life time, security, computational capability, communication technology, power, size, programming interface and applications. This paper analyses commercially (and research prototypes) available wireless sensor nodes based on these parameters and outlines research directions in this area},
   3552         www_section = {FPGA, wireless sensor network},
   3553         isbn = {978-0-7695-3639-2},
   3554         doi = {10.1109/WAINA.2009.192},
   3555         url = {http://portal.acm.org/citation.cfm?id=1588304.1590633$\#$},
   3556         author = {Potdar, Vidyasagar and Sharif, Atif and Chang, Elizabeth}
   3557 }
   3558 @conference {DBLP:conf/esorics/DanezisDKT09,
   3559         title = {The Wisdom of Crowds: Attacks and Optimal Constructions},
   3560         booktitle = {Proceedings of the 14th European Symposium on Research in Computer Security (ESORICS 2009), Saint-Malo, France, September 21-23},
   3561         series = {Lecture Notes in Computer Science},
   3562         volume = {5789},
   3563         year = {2009},
   3564         pages = {406--423},
   3565         publisher = {Springer},
   3566         organization = {Springer},
   3567         abstract = {We present a traffic analysis of the ADU anonymity scheme presented at ESORICS 2008, and the related RADU scheme. We show that optimal attacks are able to de-anonymize messages more effectively than believed before. Our analysis applies to single messages as well as long term observations using multiple messages. The search of a {\textquotedblleft}better{\textquotedblright} scheme is bound to fail, since we prove that the original Crowds anonymity system provides the best security for any given mean messaging latency. Finally we present D-Crowds, a scheme that supports any path length distribution, while leaking the least possible information, and quantify the optimal attacks against it},
   3568         www_section = {anonymity, Crowds, traffic analysis},
   3569         isbn = {978-3-642-04443-4},
   3570         doi = {10.1007/978-3-642-04444-1},
   3571         url = {http://www.springerlink.com/content/t6q86u137t4762k8/},
   3572         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DanezisDKT09.pdf},
   3573         author = {George Danezis and Claudia Diaz and Emilia K{\"a}sper and Carmela Troncoso},
   3574         editor = {Michael Backes and Peng Ning}
   3575 }
   3576 @conference {wpes09-xpay,
   3577         title = {XPay: Practical anonymous payments for Tor routing and other networked services},
   3578         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2009)},
   3579         year = {2009},
   3580         month = {November},
   3581         publisher = {ACM},
   3582         organization = {ACM},
   3583         abstract = {We design and analyze the first practical anonymous payment mechanisms for network services. We start by reporting on our experience with the implementation of a routing micropayment solution for Tor. We then propose micropayment protocols of increasingly complex requirements for networked services, such as P2P or cloud-hosted services.
   3584 
   3585 The solutions are efficient, with bandwidth and latency overheads of under 4\% and 0.9 ms respectively (in ORPay for Tor), provide full anonymity (both for payers and payees), and support thousands of transactions per second},
   3586         www_section = {anonymity, onion routing, payment, privacy},
   3587         isbn = {978-1-60558-783-7 },
   3588         doi = {10.1145/1655188.1655195},
   3589         url = {http://portal.acm.org/citation.cfm?id=1655188.1655195},
   3590         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes09-xpay.pdf},
   3591         author = {Yao Chen and Radu Sion and Bogdan Carbunar}
   3592 }
   3593 @conference {2008_0,
   3594         title = {AmbiComp: A platform for distributed execution of Java programs on embedded systems by offering a single system image },
   3595         booktitle = {AmI-Blocks'08, European Conference on Ambient Intelligence 2008 manuscript No},
   3596         year = {2008},
   3597         month = jan,
   3598         abstract = {Ambient Intelligence pursues the vision that small networked computers will jointly perform tasks that create the illusion of an intelligent environment. One of the most pressing challenges in this context is the question how one could easily develop software for such highly complex, but resource-scarce systems. In this paper we present a snapshot of our ongoing work towards facilitating  oftware development for Am- bient Intelligence systems. In particular, we present the AmbiComp [1] platform. It consists of small, modular hardware, a
   3599 exible rmware including a Java Virtual Machine, and an Eclipse-based integrated development environment},
   3600         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/publ_2008_eickhold-fuhrmann-saballus-ua_ambicomp.pdf},
   3601         author = {Johannes Eickhold and Thomas Fuhrmann and Bjoern Saballus and Sven Schlender and Thomas Suchy}
   3602 }
   3603 @conference {2008_1,
   3604         title = {Analyzing Unreal Tournament 2004 Network Traffic Characteristics},
   3605         booktitle = {CGAT'08 Singapore, 28th-30th},
   3606         year = {2008},
   3607         abstract = {With increasing availability of high-speed access links in the private sector, online real-time gaming has become a major and still growing segment in terms of market and network impact today. One of the most popular games is Unreal Tournament 2004, a fast-paced action game that still ranks within the top 10 of the most-played multiplayer Internet-games, according to GameSpy [1]. Besides high demands in terms of graphical computation, games like Unreal also impose hard requirements regarding network packet delay and jitter, for small deterioration in these conditions influences gameplay recognizably. To make matters worse, such games generate a very specific network traffic with strong requirements in terms of data delivery. In this paper, we analyze the network traffic characteristics of Unreal Tournament 2004. The experiments include different aspects like variation of map sizes, player count, player behavior as well as hardware and game-specific configuration. We show how different operating systems influence network behavior of the game. Our work gives a promising picture of how the specific real-time game behaves in terms of network impact and may be used as a basis e.g. for the development of specialized traffic generators},
   3608         url = {http://www.tm.uka.de/itm/WebMan/view.php?view=publikationen_detail\&id=295},
   3609         author = {H{\"u}bsch, Christian}
   3610 }
   3611 @conference {Zivan:2008:ALS:1402821.1402895,
   3612         title = {Anytime local search for distributed constraint optimization},
   3613         booktitle = {AAMAS'08--Proceedings of the 7th international joint conference on Autonomous agents and multiagent systems },
   3614         series = {AAMAS '08},
   3615         year = {2008},
   3616         month = may,
   3617         pages = {1449--1452},
   3618         publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
   3619         organization = {International Foundation for Autonomous Agents and Multiagent Systems},
   3620         address = {Estoril, Portugal},
   3621         abstract = {Most former studies of Distributed Constraint Optimization Problems (DisCOPs) search considered only complete search algorithms, which are practical only for relatively small problems. Distributed local search algorithms can be used for solving DisCOPs. However, because of the differences between the global evaluation of a system's state and the private evaluation of states by agents, agents are unaware of the global best state which is explored by the algorithm. Previous attempts to use local search algorithms for solving DisCOPs reported the state held by the system at the termination of the algorithm, which was not necessarily the best state explored.
   3622 
   3623 A general framework for implementing distributed local search algorithms for DisCOPs is proposed. The proposed framework makes use of a BFS-tree in order to accumulate the costs of the system's state in its different steps and to propagate the detection of a new best step when it is found. The resulting framework enhances local search algorithms for DisCOPs with the anytime property. The proposed framework does not require additional network load. Agents are required to hold a small (linear) additional space (beside the requirements of the algorithm in use). The proposed framework preserves privacy at a higher level than complete DisCOP algorithms which make use of a pseudo-tree (ADOPT, DPOP)},
   3624         www_section = {algorithms, BFS-Tree, DCOP, DisCOPs, framework},
   3625         isbn = {978-0-9817381-2-3},
   3626         url = {http://dl.acm.org/citation.cfm?id=1402821.1402895},
   3627         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAAI\%2708\%20-\%20Local\%20search\%20for\%20DCOP.pdf},
   3628         author = {Zivan, Roie}
   3629 }
   3630 @booklet {WongSirer2008ApproximateMatching,
   3631         title = {Approximate Matching for Peer-to-Peer Overlays with Cubit},
   3632         year = {2008},
   3633         publisher = {Cornell University, Computing and Information Science Technical Report},
   3634         abstract = {Keyword search is a critical component in most content retrieval systems. Despite the emergence of completely decentralized and efficient peer-to-peer techniques for content distribution, there have not been similarly efficient, accurate, and decentralized mechanisms for contentdiscoverybasedonapproximatesearchkeys. Inthis paper, we present a scalable and efficient peer-to-peer system calledCubitwith anewsearchprimitivethat can efficientlyfindthe k dataitemswithkeysmostsimilarto a givensearchkey. Thesystem worksbycreatingakeyword metric space that encompasses both the nodes and theobjectsinthesystem,wherethedistancebetweentwo points is a measure of the similarity between the strings thatthepointsrepresent. It providesa loosely-structured overlaythat can efficientlynavigatethis space. We evaluate Cubit through both a real deployment as a search plugin for a popular BitTorrent client and a large-scale simulation and show that it provides an efficient, accurateandrobustmethodto handleimprecisestringsearch infilesharingapplications. 1},
   3635         www_section = {distributed hash table, p2psockets},
   3636         author = {Bernard Wong, Aleksandrs Slivkins and Emin G{\"u}n Sirer}
   3637 }
   3638 @conference {Yang:2008:ABD:1403027.1403032,
   3639         title = {Auction, but don't block},
   3640         booktitle = {NetEcon'08. Proceedings of the 3rd International Workshop on Economics of Networked Systems},
   3641         series = {NetEcon '08},
   3642         year = {2008},
   3643         month = aug,
   3644         pages = {19--24},
   3645         publisher = {ACM},
   3646         organization = {ACM},
   3647         address = {Seattle, WA, USA},
   3648         abstract = {This paper argues that ISP's recent actions to block certain applications (e.g. BitTorrent) and attempts to differentiate traffic could be a signal of bandwidth scarcity. Bandwidth-intensive applications such as VoD could have driven the traffic demand to the capacity limit of their networks. This paper proposes to let ISPs auction their bandwidth, instead of blocking or degrading applications. A user places a bid in a packet header based on how much he values the communication. When congestion occurs, ISPs allocate bandwidth to those users that value their packets the most, and charge them the Vickrey auction price. We outline a design that addresses the technical challenges to support this auction and analyze its feasibility. Our analysis suggests that the design have reasonable overhead and could be feasible with modern hardware},
   3649         www_section = {auction, Internet, net-neutrality},
   3650         isbn = {978-1-60558-179-8},
   3651         doi = {http://doi.acm.org/10.1145/1403027.1403032},
   3652         url = {http://doi.acm.org/10.1145/1403027.1403032},
   3653         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2708\%20-\%20Yang\%20-\%20Auction\%2C\%20but\%20don\%27t\%20block.pdf},
   3654         author = {Yang, Xiaowei}
   3655 }
   3656 @conference {1387603,
   3657         title = {BFT protocols under fire},
   3658         booktitle = {NSDI'08: Proceedings of the 5th USENIX Symposium on Networked Systems Design and Implementation},
   3659         year = {2008},
   3660         pages = {189--204},
   3661         publisher = {USENIX Association},
   3662         organization = {USENIX Association},
   3663         address = {Berkeley, CA, USA},
   3664         abstract = {Much recent work on Byzantine state machine replication focuses on protocols with improved performance under benign conditions (LANs, homogeneous replicas, limited crash faults), with relatively little evaluation under typical, practical conditions (WAN delays, packet loss, transient disconnection, shared resources). This makes it difficult for system designers to choose the appropriate protocol for a real target deployment. Moreover, most protocol implementations differ in their choice of runtime environment, crypto library, and transport, hindering direct protocol comparisons even under similar conditions.
   3665 
   3666 We present a simulation environment for such protocols that combines a declarative networking system with a robust network simulator. Protocols can be rapidly implemented from pseudocode in the high-level declarative language of the former, while network conditions and (measured) costs of communication packages and crypto primitives can be plugged into the latter. We show that the resulting simulator faithfully predicts the performance of native protocol implementations, both as published and as measured in our local network.
   3667 
   3668 We use the simulator to compare representative protocols under identical conditions and rapidly explore the effects of changes in the costs of crypto operations, workloads, network conditions and faults. For example, we show that Zyzzyva outperforms protocols like PBFT and Q/U undermost but not all conditions, indicating that one-size-fits-all protocols may be hard if not impossible to design in practice},
   3669         isbn = {111-999-5555-22-1},
   3670         url = {http://portal.acm.org/citation.cfm?id=1387603$\#$},
   3671         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BFTSim-nsdi08.pdf},
   3672         author = {Singh, Atul and Das, Tathagata and Maniatis, Petros and Peter Druschel and Roscoe, Timothy}
   3673 }
   3674 @conference {bauer:alpaca2008,
   3675         title = {BitBlender: Light-Weight Anonymity for BitTorrent},
   3676         booktitle = {Proceedings of the Workshop on Applications of Private and Anonymous Communications (AlPACa 2008)},
   3677         year = {2008},
   3678         month = {September},
   3679         publisher = {ACM},
   3680         organization = {ACM},
   3681         address = {Istanbul, Turkey},
   3682         abstract = {We present BitBlender, an efficient protocol that provides an anonymity layer for BitTorrent traffic. BitBlender works by creating an ad-hoc multi-hop network consisting of special peers called "relay peers" that proxy requests and replies on behalf of other peers. To understand the effect of introducing relay peers into the BitTorrent system architecture, we provide an analysis of the expected path lengths as the ratio of relay peers to normal peers varies. A prototype is implemented and experiments are conducted on Planetlab to quantify the performance overhead associated with the protocol. We also propose protocol extensions to add confidentiality and access control mechanisms, countermeasures against traffic analysis attacks, and selective caching policies that simultaneously increase both anonymity and performance. We finally discuss the potential legal obstacles to deploying an anonymous file sharing protocol. This work is among the first to propose a privacy enhancing system that is designed specifically for a particular class of peer-to-peer traffic},
   3683         www_section = {ad-hoc networks, anonymity, P2P, privacy},
   3684         doi = {10.1145/1461464.1461465},
   3685         url = {http://portal.acm.org/citation.cfm?id=1461465},
   3686         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bauer-alpaca2008.pdf},
   3687         author = {Kevin Bauer and Damon McCoy and Dirk Grunwald and Douglas Sicker}
   3688 }
   3689 @article {Levin:2008:BAA:1402946.1402987,
   3690         title = {BitTorrent is an Auction: Analyzing and Improving BitTorrent's Incentives},
   3691         journal = {SIGCOMM Computer Communication Review},
   3692         volume = {38},
   3693         year = {2008},
   3694         month = aug,
   3695         pages = {243--254},
   3696         publisher = {ACM},
   3697         address = {New York, NY, USA},
   3698         abstract = {Incentives play a crucial role in BitTorrent, motivating users to upload to others to achieve fast download times for all peers. Though long believed to be robust to strategic manipulation, recent work has empirically shown that BitTorrent does not provide its users incentive to follow the protocol. We propose an auction-based model to study and improve upon BitTorrent's incentives. The insight behind our model is that BitTorrent uses, not tit-for-tat as widely believed, but an auction to decide which peers to serve. Our model not only captures known, performance-improving strategies, it shapes our thinking toward new, effective strategies. For example, our analysis demonstrates, counter-intuitively, that BitTorrent peers have incentive to intelligently under-report what pieces of the file they have to their neighbors. We implement and evaluate a modification to BitTorrent in which peers reward one another with proportional shares of bandwidth. Within our game-theoretic model, we prove that a proportional-share client is strategy-proof. With experiments on PlanetLab, a local cluster, and live downloads, we show that a proportional-share unchoker yields faster downloads against BitTorrent and BitTyrant clients, and that under-reporting pieces yields prolonged neighbor interest},
   3699         www_section = {auctions, BitTorrent, proportional share, tit-for-tat},
   3700         issn = {0146-4833},
   3701         doi = {http://doi.acm.org/10.1145/1402946.1402987},
   3702         url = {http://doi.acm.org/10.1145/1402946.1402987},
   3703         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20BitTorrent\%20is\%20an\%20Auction.pdf},
   3704         author = {Levin, Dave and LaCurts, Katrina and Spring, Neil and Bobby Bhattacharjee}
   3705 }
   3706 @conference { bootstrap2008gauthierdickey,
   3707         title = {Bootstrapping of Peer-to-Peer Networks},
   3708         booktitle = {Proceedings of DAS-P2P},
   3709         year = {2008},
   3710         month = {August},
   3711         publisher = {IEEE},
   3712         organization = {IEEE},
   3713         address = {Turku, Finland},
   3714         abstract = {In this paper, we present the first heuristic for fully distributed bootstrapping of peer-to-peer networks. Our heuristic generates a stream of promising IP addresses to be probed as entry points. This stream is generated using statistical profiles using the IP ranges of start-of-authorities (SOAs) in the domain name system (DNS). We
   3715 present experimental results demonstrating that with this approach it is efficient and practical to bootstrap Gnutella-sized peer-to-peer networks --- without the need for centralized services or the public exposure of end-user's private IP addresses},
   3716         www_section = {bootstrapping, DNS, installation, P2P},
   3717         url = {http://grothoff.org/christian/bootstrap.pdf},
   3718         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bootstrap.pdf},
   3719         author = {Chis GauthierDickey and Christian Grothoff}
   3720 }
   3721 @conference {shimshock-pet2008,
   3722         title = {Breaking and Provably Fixing Minx},
   3723         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   3724         year = {2008},
   3725         month = {July},
   3726         pages = {99--114},
   3727         publisher = {Springer},
   3728         organization = {Springer},
   3729         address = {Leuven, Belgium},
   3730         abstract = {In 2004, Danezis and Laurie proposed Minx, an encryption protocol and packet format for relay-based anonymity schemes, such as mix networks and onion routing, with simplicity as a primary design goal. Danezis and Laurie argued informally about the security properties of Minx but left open the problem of proving its security. In this paper, we show that there cannot be such a proof by showing that an active global adversary can decrypt Minx messages in polynomial time. To mitigate this attack, we also prove secure a very simple modification of the Minx protocol},
   3731         www_section = {attack, onion routing},
   3732         isbn = {978-3-540-70629-8},
   3733         doi = {10.1007/978-3-540-70630-4_7},
   3734         url = {http://portal.acm.org/citation.cfm?id=1428259.1428266},
   3735         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shimshock-pet2008.pdf},
   3736         author = {Eric Shimshock and Matt Staats and Nicholas J. Hopper}
   3737 }
   3738 @conference {danezis-pet2008,
   3739         title = {Bridging and Fingerprinting: Epistemic Attacks on Route Selection},
   3740         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   3741         year = {2008},
   3742         month = {July},
   3743         pages = {133--150},
   3744         publisher = {Springer},
   3745         organization = {Springer},
   3746         address = {Leuven, Belgium},
   3747         abstract = {Users building routes through an anonymization network must discover the nodes comprising the network. Yet, it is potentially costly, or even infeasible, for everyone to know the entire network. We introduce a novel attack, the route bridging attack, which makes use of what route creators do not know of the network. We also present new discussion and results concerning route fingerprinting attacks, which make use of what route creators do know of the network. We prove analytic bounds for both route fingerprinting and route bridging and describe the impact of these attacks on published anonymity-network designs. We also discuss implications for network scaling and client-server vs. peer-to-peer systems},
   3748         www_section = {anonymity, P2P, route bridging attack},
   3749         isbn = {978-3-540-70629-8},
   3750         doi = {10.1007/978-3-540-70630-4},
   3751         url = {http://www.springerlink.com/content/q2r7g81286026576/},
   3752         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2008.pdf},
   3753         author = {George Danezis and Paul Syverson},
   3754         editor = {Borisov, Nikita and Ian Goldberg}
   3755 }
   3756 @article {1373992,
   3757         title = {Characterizing unstructured overlay topologies in modern P2P file-sharing systems},
   3758         journal = {IEEE/ACM Trans. Netw},
   3759         volume = {16},
   3760         number = {2},
   3761         year = {2008},
   3762         pages = {267--280},
   3763         publisher = {IEEE Press},
   3764         address = {Piscataway, NJ, USA},
   3765         abstract = {In recent years, peer-to-peer (P2P) file-sharing systems have evolved to accommodate growing numbers of participating peers. In particular, new features have changed the properties of the unstructured overlay topologies formed by these peers. Little is known about the characteristics of these topologies and their dynamics in modern file-sharing applications, despite their importance. This paper presents a detailed characterization of P2P overlay topologies and their dynamics, focusing on the modern Gnutella network. We present Cruiser, a fast and accurate P2P crawler, which can capture a complete snapshot of the Gnutella network of more than one million peers in just a few minutes, and show how inaccuracy in snapshots can lead to erroneous conclusions--such as a power-law degree distribution. Leveraging recent overlay snapshots captured with Cruiser, we characterize the graph-related properties of individual overlay snapshots and overlay dynamics across slices of back-to-back snapshots. Our results reveal that while the Gnutella network has dramatically grown and changed in many ways, it still exhibits the clustering and short path lengths of a small world network. Furthermore, its overlay topology is highly resilient to random peer departure and even systematic attacks. More interestingly, overlay dynamics lead to an "onion-like" biased connectivity among peers where each peer is more likely connected to peers with higher uptime. Therefore, long-lived peers form a stable core that ensures reachability among peers despite overlay dynamics},
   3766         www_section = {file-sharing, P2P},
   3767         issn = {1063-6692},
   3768         doi = {10.1109/TNET.2007.900406},
   3769         url = {http://portal.acm.org/citation.cfm?id=1373992$\#$},
   3770         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/stutzbach.pdf},
   3771         author = {Stutzbach, Daniel and Rejaie, Reza and Sen, Subhabrata}
   3772 }
   3773 @conference {torspinISC08,
   3774         title = {Compromising Anonymity Using Packet Spinning},
   3775         booktitle = {Proceedings of the 11th Information Security Conference (ISC 2008)},
   3776         year = {2008},
   3777         month = {September},
   3778         publisher = {Springer-Verlag  Berlin, Heidelberg},
   3779         organization = {Springer-Verlag  Berlin, Heidelberg},
   3780         abstract = {We present a novel attack targeting anonymizing systems. The attack involves placing a malicious relay node inside an anonymizing system and keeping legitimate nodes "busy." We achieve this by creating circular circuits and injecting fraudulent packets, crafted in a way that will make them spin an arbitrary number of times inside our artificial loops. At the same time we inject a small number of malicious nodes that we control into the anonymizing system. By keeping a significant part of the anonymizing system busy spinning useless packets, we increase the probability of having our nodes selected in the creation of legitimate circuits, since we have more free capacity to route requests than the legitimate nodes. This technique may lead to the compromise of the anonymity of people using the system.
   3781 
   3782 To evaluate our novel attack, we used a real-world anonymizing system, TOR. We show that an anonymizing system that is composed of a series of relay nodes which perform cryptographic operations is vulnerable to our packet spinning attack. Our evaluation focuses on determining the cost we can introduce to the legitimate nodes by injecting the fraudulent packets, and the time required for a malicious client to create n-length TOR circuits. Furthermore we prove that routers that are involved in packet spinning do not have the capacity to process requests for the creation of new circuits and thus users are forced to select our malicious nodes for routing their data streams},
   3783         www_section = {anonymity, attack, Tor},
   3784         isbn = {978-3-540-85884-3},
   3785         doi = {10.1007/978-3-540-85886-7_11},
   3786         url = {http://portal.acm.org/citation.cfm?id=1432478.1432493},
   3787         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/torspinISC08.pdf},
   3788         author = {Vasilis Pappas and Elias Athanasopoulos and Sotiris Ioannidis and Evangelos P. Markatos}
   3789 }
   3790 @conference {MarPi08,
   3791         title = {A Concept of an Anonymous Direct P2P Distribution Overlay System},
   3792         booktitle = {Proceedings of IEEE 22nd International Conference on Advanced Information Networking and Applications (AINA)},
   3793         year = {2008},
   3794         month = mar,
   3795         pages = {590--597},
   3796         publisher = {IEEE Computer Society Press},
   3797         organization = {IEEE Computer Society Press},
   3798         address = {Gino-wan, Okinawa, Japan},
   3799         abstract = {The paper introduces a peer-to-peer system called P2PRIV (peer-to-peer direct and anonymous distribution overlay). Basic novel features of P2PRIV are: (i) a peer-to-peer parallel content exchange architecture, and (ii) separation of the anonymization process from the transport function. These features allow a considerable saving of service time while preserving high degree of anonymity. In the paper we evaluate anonymity measures of P2PRIV (using a normalized entropy measurement model) as well as its traffic measures (including service time and network dynamics), and compare anonymity and traffic performance of P2PRIV with a well known system called CROWDS},
   3800         www_section = {communication system security, privacy},
   3801         isbn = {978-0-7695-3095-6},
   3802         doi = {10.1109/AINA.2008.117},
   3803         url = {http://portal.acm.org/citation.cfm?id=1395079.1395235},
   3804         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MarPi08.pdf},
   3805         author = {Igor Margasinski and Michal Pioro}
   3806 }
   3807 @conference {2008_2,
   3808         title = {Consistency Management for Peer-to-Peer-based Massively Multiuser Virtual Environments},
   3809         booktitle = { Proc. 1st Int.Workshop on Massively Multiuser Virtual Environments (MMVE'08)},
   3810         year = {2008},
   3811         url = {http://www.spovnet.de/files/publications/MMVEConsistency.pdf/view},
   3812         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MMVEConsistency.pdf},
   3813         author = {Gregor Schiele and Richard S{\"u}selbeck and Arno Wacker and Triebel, Tonio and Christian Becker}
   3814 }
   3815 @mastersthesis {2008_3,
   3816         title = {The Decentralized File System Igor-FS as an Application for Overlay-Networks},
   3817         volume = {Engineering},
   3818         year = {2008},
   3819         month = feb,
   3820         pages = {0--193},
   3821         school = {Universit{\"a}t Fridericiana (TH) },
   3822         type = {Doctoral},
   3823         address = {Karlsruhe, Germany},
   3824         abstract = {Working in distributed systems is part of the information society. More and more people and organizations work with growing data volumes.
   3825 Often, part of the problem is to access large files in a share way. Until now, there are two often used approaches to allow this kind off access. Either the files are tranfered via FTP, e-mail or similar medium before the access happens, or a centralized server provides file services. The first alternative has the disadvantage that the entire file has to be transfered before the first access can be successful. If only small parts in the file have been changed compared to a previous version, the entire file has to be transfered anyway. The centralized approach has disadvantages regarding scalability and reliability. In both approaches authorization and authentication can be difficult in case users are seperated by untrusted network segements},
   3826         author = {unknown},
   3827         url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000009668},
   3828         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kutzner\%20-\%20The\%20descentralized\%20file\%20system\%20Igor-FS\%20as\%20an\%20application_0.pdf}
   3829 }
   3830 @article { vrancx:decentralized,
   3831         title = {Decentralized Learning in Markov Games},
   3832         journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part B},
   3833         volume = {38},
   3834         year = {2008},
   3835         month = aug,
   3836         pages = {976--981},
   3837         abstract = {Learning automata (LA) were recently shown to be valuable tools for designing multiagent reinforcement learning algorithms. One of the principal contributions of the LA theory is that a set of decentralized independent LA is able to control a finite Markov chain with unknown transition probabilities and rewards. In this paper, we propose to extend this algorithm to Markov games-a straightforward extension of single-agent Markov decision problems to distributed multiagent decision problems. We show that under the same ergodic assumptions of the original theorem, the extended algorithm will converge to a pure equilibrium point between agent policies},
   3838         www_section = {algorithms, descentralized learning, LA, learning automata},
   3839         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Transactions\%20on\%20Systems\%20-\%20Descentralized\%20learning.pdf},
   3840         author = {Peter Vrancx and Katja Verbeeck and Ann Now{\'e}}
   3841 }
   3842 @conference {ccs2008:wang,
   3843         title = {Dependent Link Padding Algorithms for Low Latency Anonymity Systems},
   3844         booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications Security (CCS 2008)},
   3845         year = {2008},
   3846         month = {October},
   3847         pages = {323--332},
   3848         publisher = {ACM Press},
   3849         organization = {ACM Press},
   3850         address = {Alexandria, Virginia, USA},
   3851         abstract = {Low latency anonymity systems are susceptive to traffic analysis attacks. In this paper, we propose a dependent link padding scheme to protect anonymity systems from traffic analysis attacks while providing a strict delay bound. The covering traffic generated by our scheme uses the minimum sending rate to provide full anonymity for a given set of flows. The relationship between user anonymity and the minimum covering traffic rate is then studied via analysis and simulation. When user flows are Poisson processes with the same sending rate, the minimum covering traffic rate to provide full anonymity to m users is O(log m). For Pareto traffic, we show that the rate of the covering traffic converges to a constant when the number of flows goes to infinity. Finally, we use real Internet trace files to study the behavior of our algorithm when user flows have different rates},
   3852         www_section = {anonymity service, link padding, traffic analysis},
   3853         isbn = {978-1-59593-810-7},
   3854         doi = {10.1145/1455770.1455812},
   3855         url = {http://portal.acm.org/citation.cfm?id=1455812},
   3856         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dependent\%20Link\%20Padding\%20Algorithms\%20for.pdf},
   3857         author = {Wei Wang and Mehul Motani and Vikram Srinivasan},
   3858         editor = {Paul Syverson and Somesh Jha and Xiaolan Zhang}
   3859 }
   3860 @conference {Dischinger:2008:DBB:1452520.1452523,
   3861         title = {Detecting BitTorrent Blocking},
   3862         booktitle = {IMC'08. Proceedings of the 8th ACM SIGCOMM conference on Internet measurement},
   3863         series = {IMC '08},
   3864         year = {2008},
   3865         month = oct,
   3866         pages = {3--8},
   3867         publisher = {ACM},
   3868         organization = {ACM},
   3869         address = {Vouliagmeni, Greece},
   3870         abstract = {Recently, it has been reported that certain access ISPs are surreptitiously blocking their customers from uploading data using the popular BitTorrent file-sharing protocol. The reports have sparked an intense and wide-ranging policy debate on network neutrality and ISP traffic management practices. However, to date, end users lack access to measurement tools that can detect whether their access ISPs are blocking their BitTorrent traffic. And since ISPs do not voluntarily disclose their traffic management policies, no one knows how widely BitTorrent traffic blocking is deployed in the current Internet. In this paper, we address this problem by designing an easy-to-use tool to detect BitTorrent blocking and by presenting results from a widely used public deployment of the tool},
   3871         www_section = {BitTorrent, blocking, network measurement},
   3872         isbn = {978-1-60558-334-1},
   3873         doi = {http://doi.acm.org/10.1145/1452520.1452523},
   3874         url = {http://doi.acm.org/10.1145/1452520.1452523},
   3875         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2708\%20-\%20Detecting\%20BitTorrent\%20Blocking.pdf},
   3876         author = {Dischinger, Marcel and Mislove, Alan and Haeberlen, Andreas and P. Krishna Gummadi}
   3877 }
   3878 @conference {clog-the-queue,
   3879         title = {Don't Clog the Queue: Circuit Clogging and Mitigation in P2P anonymity schemes},
   3880         booktitle = {Proceedings of Financial Cryptography (FC '08)},
   3881         year = {2008},
   3882         month = jan,
   3883         publisher = {Springer-Verlag  Berlin, Heidelberg},
   3884         organization = {Springer-Verlag  Berlin, Heidelberg},
   3885         abstract = {At Oakland 2005, Murdoch and Danezis described an attack on the Tor anonymity service that recovers the nodes in a Tor circuit, but not the client. We observe that in a peer-to-peer anonymity scheme, the client is part of the circuit and thus the technique can be of greater significance in this setting. We experimentally validate this conclusion by showing that "circuit clogging" can identify client nodes using the MorphMix peer-to-peer anonymity protocol. We also propose and empirically validate the use of the Stochastic Fair Queueing discipline on outgoing connections as an efficient and low-cost mitigation technique},
   3886         www_section = {anonymity, P2P, Tor},
   3887         doi = {10.1007/978-3-540-85230-8_3},
   3888         url = {http://portal.acm.org/citation.cfm?id=1428551},
   3889         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clog-the-queue.pdf},
   3890         author = {Jon McLachlan and Nicholas J. Hopper}
   3891 }
   3892 @article {DBLP:journals/pvldb/Amer-YahiaBLS08,
   3893         title = {Efficient network aware search in collaborative tagging sites},
   3894         journal = {PVLDB'08},
   3895         volume = {1},
   3896         number = {1},
   3897         year = {2008},
   3898         month = {August},
   3899         address = {Auckland, New Zealand},
   3900         url = {https://bibliography.gnunet.org},
   3901         author = {Sihem Amer-Yahia and Michael Benedikt and Laks V. S. Lakshmanan and Julia Stoyanovich}
   3902 }
   3903 @conference {BecchiCrowley2008EfficientRegexEval,
   3904         title = {Efficient regular expression evaluation: theory to practice},
   3905         booktitle = {Proceedings of the 4th ACM/IEEE Symposium on Architectures for Networking and Communications Systems},
   3906         series = {ANCS '08},
   3907         year = {2008},
   3908         pages = {50--59},
   3909         publisher = {ACM},
   3910         organization = {ACM},
   3911         address = {New York, NY, USA},
   3912         isbn = {978-1-60558-346-4},
   3913         doi = {10.1145/1477942.1477950},
   3914         url = {http://doi.acm.org/10.1145/1477942.1477950},
   3915         author = {Becchi, Michela and Crowley, Patrick}
   3916 }
   3917 @article {1373458,
   3918         title = {Efficient routing in intermittently connected mobile networks: the single-copy case},
   3919         journal = {IEEE/ACM Trans. Netw},
   3920         volume = {16},
   3921         number = {1},
   3922         year = {2008},
   3923         pages = {63--76},
   3924         publisher = {IEEE Press},
   3925         address = {Piscataway, NJ, USA},
   3926         abstract = {Intermittently connected mobile networks are wireless networks where most of the time there does not exist a complete path from the source to the destination. There are many real networks that follow this model, for example, wildlife tracking sensor networks, military networks, vehicular ad hoc networks (VANETs), etc. In this context, conventional routing schemes would fail, because they try to establish complete end-to-end paths, before any data is sent.
   3927 
   3928 To deal with such networks researchers have suggested to use flooding-based routing schemes. While flooding-based schemes have a high probability of delivery, they waste a lot of energy and suffer from severe contention which can significantly degrade their performance. With this in mind, we look into a number of "single-copy" routing schemes that use only one copy per message, and hence significantly reduce the resource requirements of flooding-based algorithms. We perform a detailed exploration of the single-copy routing space in order to identify efficient single-copy solutions that (i) can be employed when low resource usage is critical, and (ii) can help improve the design of general routing schemes that use multiple copies. We also propose a theoretical framework that we use to analyze the performance of all single-copy schemes presented, and to derive upper and lower bounds on the delay of any scheme},
   3929         www_section = {mobile Ad-hoc networks, routing},
   3930         issn = {1063-6692},
   3931         doi = {10.1109/TNET.2007.897962},
   3932         url = {http://portal.acm.org/citation.cfm?id=1373458$\#$},
   3933         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.74.8097.pdf},
   3934         author = {Spyropoulos, Thrasyvoulos and Psounis, Konstantinos and Raghavendra, Cauligi S.}
   3935 }
   3936 @conference {CoNext2008,
   3937         title = {EGOIST: Overlay Routing using Selfish Neighbor Selection},
   3938         booktitle = {Proceedings of ACM CoNEXT 2008},
   3939         year = {2008},
   3940         month = {December},
   3941         address = {Madrid, Spain},
   3942         abstract = {A foundational issue underlying many overlay network applications ranging from routing to peer-to-peer file sharing is that of connectivity management, i.e., folding new arrivals into an existing overlay, and re-wiring to cope with changing network conditions. Previous work has considered the problem from two perspectives: devising practical heuristics for specific applications designed to work well in real deployments, and providing abstractions for the underlying problem that are analytically tractable, especially via game-theoretic analysis. In this paper, we unify these two thrusts by using insights gleaned from novel, realistic theoretic models in the design of Egoist -- a distributed overlay routing system that we implemented, deployed, and evaluated on PlanetLab. Using extensive measurements of paths between nodes, we demonstrate that Egoist's neighbor selection primitives significantly outperform existing heuristics on a variety of performance metrics, including delay, available bandwidth, and node utilization. Moreover, we demonstrate that Egoist is competitive with an optimal, but unscalable full-mesh approach, remains highly effective under significant churn, is robust to cheating, and incurs minimal overhead. Finally, we use a multiplayer peer-to-peer game to demonstrate the value of Egoist to end-user applications},
   3943         www_section = {EGOIST, game theory, overlay networks, routing, selfish neighbor selection},
   3944         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoNEXT2008.pdf},
   3945         author = {Georgios Smaragdakis and Vassilis Lekakis and Nikolaos Laoutaris and Azer Bestavros and Byers, John W. and Mema Roussopoulos}
   3946 }
   3947 @booklet {LOCEntropy2008,
   3948         title = {Entropy Bounds for Traffic Confirmation},
   3949         number = {2008/365},
   3950         year = {2008},
   3951         month = {October},
   3952         publisher = {IACR},
   3953         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LOCEntropy2008.pdf},
   3954         author = {Luke O'Connor}
   3955 }
   3956 @book {2008_4,
   3957         title = {Estimating The Size Of Peer-To-Peer Networks Using Lambert's W Function},
   3958         booktitle = {Grid Computing--Achievements and Prospects},
   3959         year = {2008},
   3960         pages = {61--72},
   3961         publisher = {Springer-Verlag},
   3962         organization = {Springer-Verlag},
   3963         address = {New York, NY, USA},
   3964         abstract = { In this work, we address the problem of locally estimating the size of a Peer-to-Peer (P2P) network using local information. We present a novel approach for estimating the size of a peer-to-peer (P2P) network, fitting the sum of new neighbors discovered at each iteration of a breadth-first search (BFS) with a logarithmic function, and then using Lambert's W function to solve a root of a ln(n) + b--n = 0, where n is the network size. With rather little computation, we reach an estimation error of at most 10 percent, only allowing the BFS to iterate to the third level},
   3965         www_section = {distributed computing, lambert w function, network size estimation, peer-to-peer networking},
   3966         isbn = {978-0-387-09456-4},
   3967         issn = {978-0-387-09456-4},
   3968         url = {http://eprints.adm.unipi.it/649/},
   3969         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Grid\%20Computing\%20-\%20Estimating\%20The\%20Size\%20Of\%20Peer-To-Peer\%20Networks.pdf},
   3970         author = {Javier Bustos-Jim{\'e}nez and Nicol{\'a}s Bersano and Satu Elisa Schaeffer and Jos{\'e} Miguel Piquer and Alexandru Iosup and Augusto Ciuffoletti}
   3971 }
   3972 @conference {Junges:2008:EPD:1402298.1402308,
   3973         title = {Evaluating the performance of DCOP algorithms in a real world, dynamic problem},
   3974         booktitle = {AAMAS8--Proceedings of the 7th international joint conference on Autonomous agents and multiagent systems},
   3975         series = {AAMAS '08},
   3976         year = {2008},
   3977         month = may,
   3978         pages = {599--606},
   3979         publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
   3980         organization = {International Foundation for Autonomous Agents and Multiagent Systems},
   3981         address = {Estoril, Portugal},
   3982         abstract = {Complete algorithms have been proposed to solve problems modelled as distributed constraint optimization (DCOP). However, there are only few attempts to address real world scenarios using this formalism, mainly because of the complexity associated with those algorithms. In the present work we compare three complete algorithms for DCOP, aiming at studying how they perform in complex and dynamic scenarios of increasing sizes. In order to assess their performance we measure not only standard quantities such as number of cycles to arrive to a solution, size and quantity of exchanged messages, but also computing time and quality of the solution which is related to the particular domain we use. This study can shed light in the issues of how the algorithms perform when applied to problems other than those reported in the literature (graph coloring, meeting scheduling, and distributed sensor network)},
   3983         www_section = {coordination, DCOP, distributed constraint optimization, traffic control},
   3984         isbn = {978-0-9817381-1-6},
   3985         url = {http://dl.acm.org/citation.cfm?id=1402298.1402308},
   3986         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAMAS08\%20-\%20DCOP\%20algorithms\%20in\%20a\%20real\%20world\%20problem.pdf},
   3987         author = {Junges, Robert and Bazzan, Ana L. C.}
   3988 }
   3989 @conference {DBLP:conf/icc/ChenCLNC08,
   3990         title = {Experimental Analysis of Super-Seeding in BitTorrent},
   3991         booktitle = {ICC'08--Proceedings of the 2008 IEEE International Conference on Communications},
   3992         year = {2008},
   3993         month = may,
   3994         pages = {65--69},
   3995         publisher = {IEEE Computer Society},
   3996         organization = {IEEE Computer Society},
   3997         address = {Beijing, China},
   3998         abstract = {With the popularity of BitTorrent, improving its performance has been an active research area. Super-seeding, a special upload policy for initial seeds, improves the efficiency in producing multiple seeds and reduces the uploading cost of the initial seeders. However, the overall benefit of super seeding remains a question. In this paper, we conduct an experimental study over the performance of super-seeding scheme of BitTornado. We attempt to answer the following questions: whether and how much super-seeding saves uploading cost, whether the download time of all peers is decreased by super-seeding, and in which scenario super-seeding performs worse. With varying seed bandwidth and peer behavior, we analyze the overall download time and upload cost of super seeding scheme during random period tests over 250 widely distributed PlanetLab nodes. The results show that benefits of super-seeding depend highly on the upload bandwidth of the initial seeds and the behavior of individual peers. Our work not only provides reference for the potential adoption of super-seeding in BitTorrent, but also much insights for the balance of enhancing Quality of Experience (QoE) and saving cost for a large-scale BitTorrent-like P2P commercial application},
   3999         www_section = {BitTorrent, super-seeding},
   4000         isbn = {978-1-4244-2075-9},
   4001         doi = {http://dx.doi.org/10.1109/ICC.2008.20},
   4002         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICC\%2708\%20-\%20Super-Seeding\%20in\%20BitTorrent.PDF},
   4003         author = {Zhijia Chen and Yang Chen and Chuang Lin and Vaibhav Nivargi and Pei Cao}
   4004 }
   4005 @conference {Ben-David:2008:FSS:1455770.1455804,
   4006         title = {FairplayMP: a system for secure multi-party computation},
   4007         booktitle = {CCS'08--Proceedings of the 15th ACM conference on Computer and communications security},
   4008         series = {CCS '08},
   4009         year = {2008},
   4010         month = oct,
   4011         pages = {257--266},
   4012         publisher = {ACM},
   4013         organization = {ACM},
   4014         address = {Alexandria, VA, USA},
   4015         abstract = {We present FairplayMP (for "Fairplay Multi-Party"), a system for secure multi-party computation. Secure computation is one of the great achievements of modern cryptography, enabling a set of untrusting parties to compute any function of their private inputs while revealing nothing but the result of the function. In a sense, FairplayMP lets the parties run a joint computation that emulates a trusted party which receives the inputs from the parties, computes the function, and privately informs the parties of their outputs. FairplayMP operates by receiving a high-level language description of a function and a configuration file describing the participating parties. The system compiles the function into a description as a Boolean circuit, and perform a distributed evaluation of the circuit while revealing nothing else. FairplayMP supplements the Fairplay system [16], which supported secure computation between two parties. The underlying protocol of FairplayMP is the Beaver-Micali-Rogaway (BMR) protocol which runs in a constant number of communication rounds (eight rounds in our implementation). We modified the BMR protocol in a novel way and considerably improved its performance by using the Ben-Or-Goldwasser-Wigderson (BGW) protocol for the purpose of constructing gate tables. We chose to use this protocol since we believe that the number of communication rounds is a major factor on the overall performance of the protocol. We conducted different experiments which measure the effect of different parameters on the performance of the system and demonstrate its scalability. (We can now tell, for example, that running a second-price auction between four bidders, using five computation players, takes about 8 seconds.)},
   4016         www_section = {cryptography, secure multi-party computation, SMC},
   4017         isbn = {978-1-59593-810-7},
   4018         doi = {10.1145/1455770.1455804},
   4019         url = {http://doi.acm.org/10.1145/1455770.1455804},
   4020         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2708\%20-\%20FairplayMP.pdf},
   4021         author = {Ben-David, Assaf and Nisan, Noam and Pinkas, Benny}
   4022 }
   4023 @article {2008_5,
   4024         title = {On the False-positive Rate of Bloom Filters},
   4025         journal = {Inf. Process. Lett},
   4026         volume = {108},
   4027         year = {2008},
   4028         pages = {210--213},
   4029         abstract = {Bloom filters are a randomized data structure for membership queries dating back to 1970. Bloom filters sometimes give erroneous answers to queries, called false positives. Bloom analyzed the probability of such erroneous answers, called the false-positive rate, and Bloom's analysis has appeared in many publications throughout the years. We show that Bloom's analysis is incorrect and give a correct analysis},
   4030         www_section = {Analysis of algorithms, data structures},
   4031         issn = {0020-0190},
   4032         doi = {10.1016/j.ipl.2008.05.018},
   4033         url = {http://dx.doi.org/10.1016/j.ipl.2008.05.018},
   4034         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FalsepositiverateBloomFilter2008Bose.pdf},
   4035         author = {Bose, Prosenjit and Guo, Hua and Kranakis, Evangelos and Maheshwari, Anil and Morin, Pat and Morrison, Jason and Smid, Michiel and Tang, Yihui}
   4036 }
   4037 @article {Wang:2008:GAI:1412757.1412971,
   4038         title = {A game-theoretic analysis of the implications of overlay network traffic on ISP peering},
   4039         journal = {Computer Networks},
   4040         volume = {52},
   4041         year = {2008},
   4042         month = oct,
   4043         pages = {2961--2974},
   4044         publisher = {Elsevier North-Holland, Inc},
   4045         address = {New York, NY, USA},
   4046         abstract = {Inter-ISP traffic flow determines the settlement between ISPs and affects the perceived performance of ISP services. In today's Internet, the inter-ISP traffic flow patterns are controlled not only by ISPs' policy-based routing configuration and traffic engineering, but also by application layer routing. The goal of this paper is to study the economic implications of this shift in Internet traffic control assuming rational ISPs and subscribers. For this purpose, we build a general traffic model that predicts traffic patterns based on subscriber distribution and abstract traffic controls such as caching functions and performance sensitivity functions. We also build a game-theoretic model of subscribers picking ISPs, and ISPs making provisioning and peering decisions. In particular, we apply this to a local market where two ISPs compete for market share of subscribers under two traffic patterns: ''Web'' and ''P2P overlay'', that typifies the transition the current Internet is going through. Our methodology can be used to quantitatively demonstrate that (1) while economy of scale is the predominant property of the competitive ISP market, P2P traffic may introduce unfair distribution of peering benefit (i.e. free-riding); (2) the large ISP can restore more fairness by reducing its private capacity (bandwidth throttling), which has the drawback of hurting business growth; and (3) ISPs can reduce the level of peering (e.g. by reducing peering bandwidth) to restore more fairness, but this has the side-effect of also reducing the ISPs' collective bargaining power towards subscribers},
   4047         www_section = {game theory, isp, Network management, Peering, Traffic model},
   4048         issn = {1389-1286},
   4049         doi = {10.1016/j.comnet.2008.06.014},
   4050         url = {http://dl.acm.org/citation.cfm?id=1412757.1412971},
   4051         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Computer\%20Networks\%20-\%20Wang\%2C\%20Chiu\%20\%26\%20Lui\%20-\%20Overlay\%20network\%20traffic\%20on\%20ISP\%20peering.pdf},
   4052         author = {Wang, Jessie Hui and Chiu, Dah Ming and Lui, John C. S.}
   4053 }
   4054 @conference {saballus08gaos,
   4055         title = {Global Accessible Objects (GAOs) in the Ambicomp Distributed Java Virtual Machine},
   4056         booktitle = {Proceedings of the Second International Conference on Sensor Technologies and Applications (SENSORCOMM 2008)},
   4057         year = {2008},
   4058         publisher = {IEEE Computer Society},
   4059         organization = {IEEE Computer Society},
   4060         address = {Cap Esterel, France},
   4061         abstract = {As networked embedded sensors and actuators become more and more widespread, software developers encounter the difficulty to create applications that run distributed on these nodes: Typically, these nodes are heterogeneous, resource-limited, and there is no centralized control. The Ambicomp project tackles this problem. Its goal is to provide a distributed Java Virtual Machine (VM) that runs on the bare sensor node hardware. This VM creates a single system illusion across several nodes. Objects and threads can migrate freely between these nodes. In this paper, we address the problem of globally accessible objects. We describe how scalable source routing, a DHT-inspired routing protocol, can be used to allow access to objects regardless of their respective physical location and without any centralized component},
   4062         www_section = {distributed hash table},
   4063         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   4064         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saballus08gaos.pdf},
   4065         author = {Bjoern Saballus and Johannes Eickhold and Thomas Fuhrmann}
   4066 }
   4067 @booklet { back-hash,
   4068         title = {Hash cash--a denial of service counter-measure},
   4069         year = {2008},
   4070         abstract = {Hashcash was originally proposed as a mechanism to throttle systematic abuse of un-metered internet resources such as email, and anonymous remailers in May 1997. Five years on, this paper captures in one place the various applications, improvements suggested and related subsequent publications, and describes initial experience from experiments using hashcash. The hashcash CPU cost-function computes a token which can be used as a proof-of-work. Interactive and non-interactive variants of cost-functions can be constructed which can be used in situations where the server can issue a challenge (connection oriented interactive protocol), and where it can not (where the communication is store--and--forward, or packet oriented) respectively},
   4071         url = {citeseer.ist.psu.edu/back02hashcash.html},
   4072         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hashcash.pdf},
   4073         author = {Adam Back}
   4074 }
   4075 @conference {2008_6,
   4076         title = {Higher Confidence in Event Correlation Using Uncertainty Restrictions},
   4077         booktitle = {28th International Conference on In Distributed Computing Systems Workshops},
   4078         year = {2008},
   4079         abstract = {Distributed cooperative systems that use event notification for communication can benefit from event correlation within the notification network. In the presence of uncertain data, however, correlation results easily become unreliable. The handling of uncertainty is therefore an important challenge for event correlation in distributed event notification systems. In this paper, we present a generic correlation model that is aware of uncertainty. We propose uncertainty constraints that event correlation can take into account and show how they can lead to higher confidence in the correlation result. We demonstrate that the application of this model allows to obtain a qualitative description of event correlation},
   4080         url = {http://www.citeulike.org/user/nmsx/article/4505416},
   4081         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koch08confidence.pdf},
   4082         author = {Gerald G. Koch and Boris Koldehofe and Kurt Rothermel}
   4083 }
   4084 @conference {sassaman-pet2008,
   4085         title = {How to Bypass Two Anonymity Revocation Systems},
   4086         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4087         year = {2008},
   4088         month = {July},
   4089         pages = {187--201},
   4090         publisher = {Springer},
   4091         organization = {Springer},
   4092         address = {Leuven, Belgium},
   4093         abstract = {In recent years, there have been several proposals for anonymous communication systems that provide intentional weaknesses to allow anonymity to be circumvented in special cases. These anonymity revocation schemes attempt to retain the properties of strong anonymity systems while granting a special class of people the ability to selectively break through their protections. We evaluate the two dominant classes of anonymity revocation systems, and identify fundamental flaws in their architecture, leading to a failure to ensure proper anonymity revocation, as well as introducing additional weaknesses for users not targeted for anonymity revocation},
   4094         isbn = {978-3-540-70629-8},
   4095         doi = {10.1007/978-3-540-70630-4},
   4096         url = {http://www.springerlink.com/content/179453h161722821/},
   4097         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sassaman-pet2008.pdf},
   4098         author = {George Danezis and Len Sassaman},
   4099         editor = {Borisov, Nikita and Ian Goldberg}
   4100 }
   4101 @conference {Boldyreva:2008:IEE:1455770.1455823,
   4102         title = {Identity-based encryption with efficient revocation},
   4103         booktitle = {CCS'08--Proceedings of the 15th ACM Conference on Computer and Communications Security},
   4104         series = {CCS '08},
   4105         year = {2008},
   4106         month = oct,
   4107         pages = {417--426},
   4108         publisher = {ACM},
   4109         organization = {ACM},
   4110         address = {Alexandria, VA, USA},
   4111         abstract = {Identity-based encryption (IBE) is an exciting alternative to public-key encryption, as IBE eliminates the need for a Public Key Infrastructure (PKI). The senders using an IBE do not need to look up the public keys and the corresponding certificates of the receivers, the identities (e.g. emails or IP addresses) of the latter are sufficient to encrypt. Any setting, PKI- or identity-based, must provide a means to revoke users from the system. Efficient revocation is a well-studied problem in the traditional PKI setting. However in the setting of IBE, there has been little work on studying the revocation mechanisms. The most practical solution requires the senders to also use time periods when encrypting, and all the receivers (regardless of whether their keys have been compromised or not) to update their private keys regularly by contacting the trusted authority. We note that this solution does not scale well -- as the number of users increases, the work on key updates becomes a bottleneck. We propose an IBE scheme that significantly improves key-update efficiency on the side of the trusted party (from linear to logarithmic in the number of users), while staying efficient for the users. Our scheme builds on the ideas of the Fuzzy IBE primitive and binary tree data structure, and is provably secure},
   4112         www_section = {IBE, identity-based encryption, provable security, revocation},
   4113         isbn = {978-1-59593-810-7},
   4114         doi = {http://doi.acm.org/10.1145/1455770.1455823},
   4115         url = {http://doi.acm.org/10.1145/1455770.1455823},
   4116         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2708\%20-\%20Identity-based\%20encryption\%20with\%20efficient\%20revocation.pdf},
   4117         author = {Boldyreva, Alexandra and Goyal, Vipul and Kumar, Virendra}
   4118 }
   4119 @conference {DBLP:conf/p2p/AmannEHF08,
   4120         title = {IgorFs: A Distributed P2P File System},
   4121         booktitle = {Peer-to-Peer Computing},
   4122         year = {2008},
   4123         pages = {77--78},
   4124         abstract = {IgorFs is a distributed, decentralized peer-to-peer (P2P) file system that is completely transparent to the user. It is built on top of the Igor peer-to-peer overlay network, which is similar to Chord, but provides additional features like service orientation or proximity neighbor and route selection. IgorFs offers an efficient means to publish data files that are subject to frequent but minor modifications. In our demonstration we show two use cases for IgorFs: the first example is (static) software-distribution and the second example is (dynamic) file distribution},
   4125         www_section = {distributed storage, P2P},
   4126         doi = {10.1109/P2P.2008.19},
   4127         url = {http://www.pubzone.org/dblp/conf/p2p/AmannEHF08},
   4128         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/amann08igorfs.pdf},
   4129         author = {Bernhard Amann and Benedikt Elser and Yaser Houri and Thomas Fuhrmann}
   4130 }
   4131 @conference {diaz-pet2008,
   4132         title = {On the Impact of Social Network Profiling on Anonymity},
   4133         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4134         year = {2008},
   4135         month = {July},
   4136         pages = {44--62},
   4137         publisher = {Springer},
   4138         organization = {Springer},
   4139         address = {Leuven, Belgium},
   4140         abstract = {This paper studies anonymity in a setting where individuals who communicate with each other over an anonymous channel are also members of a social network. In this setting the social network graph is known to the attacker. We propose a Bayesian method to combine multiple available sources of information and obtain an overall measure of anonymity. We study the effects of network size and find that in this case anonymity degrades when the network grows. We also consider adversaries with incomplete or erroneous information; characterize their knowledge of the social network by its quantity, quality and depth; and discuss the implications of these properties for anonymity},
   4141         www_section = {anonymity, attack},
   4142         isbn = {978-3-540-70629-8},
   4143         doi = {10.1007/978-3-540-70630-4_4},
   4144         url = {http://portal.acm.org/citation.cfm?id=1428263},
   4145         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/diaz-pet2008.pdf},
   4146         author = {Claudia Diaz and Carmela Troncoso and Andrei Serjantov},
   4147         editor = {Borisov, Nikita and Ian Goldberg}
   4148 }
   4149 @conference {improved-clockskew,
   4150         title = {An Improved Clock-skew Measurement Technique for Revealing Hidden Services},
   4151         booktitle = {Proceedings of the 17th USENIX Security Symposium},
   4152         year = {2008},
   4153         month = {July},
   4154         publisher = {USENIX Association  Berkeley, CA, USA},
   4155         organization = {USENIX Association  Berkeley, CA, USA},
   4156         address = {San Jose, CA, US},
   4157         abstract = {The Tor anonymisation network allows services, such as web servers, to be operated under a pseudonym. In previous work Murdoch described a novel attack to reveal such hidden services by correlating clock skew changes with times of increased load, and hence temperature. Clock skew measurement suffers from two main sources of noise: network jitter and timestamp quantisation error. Depending on the target's clock frequency the quantisation noise can be orders of magnitude larger than the noise caused by typical network jitter. Quantisation noise limits the previous attacks to situations where a high frequency clock is available. It has been hypothesised that by synchronising measurements to the clock ticks, quantisation noise can be reduced. We show how such synchronisation can be achieved and maintained, despite network jitter. Our experiments show that synchronised sampling significantly reduces the quantisation error and the remaining noise only depends on the network jitter (but not clock frequency). Our improved skew estimates are up to two magnitudes more accurate for low-resolution timestamps and up to one magnitude more accurate for high-resolution timestamps, when compared to previous random sampling techniques. The improved accuracy not only allows previous attacks to be executed faster and with less network traffic but also opens the door to previously infeasible attacks on low-resolution clocks, including measuring skew of a HTTP server over the anonymous channel},
   4158         www_section = {anonymity, pseudonym, Tor},
   4159         url = {http://portal.acm.org/citation.cfm?id=1496726},
   4160         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/improved-clockskew.pdf},
   4161         author = {Sebastian Zander and Steven J. Murdoch}
   4162 }
   4163 @mastersthesis {reardon-thesis,
   4164         title = {Improving Tor using a TCP-over-DTLS Tunnel},
   4165         year = {2008},
   4166         month = {September},
   4167         school = {University of Waterloo},
   4168         type = {masters},
   4169         abstract = {The Tor network gives anonymity to Internet users by relaying their traffic through the world over a variety of routers. This incurs latency, and this thesis first explores where this latency occurs. Experiments discount the latency induced by routing
   4170 traffic and computational latency to determine there is a substantial component that is caused by delay in the communication path. We determine that congestion control is causing the delay.
   4171 Tor multiplexes multiple streams of data over a single TCP connection. This is not a wise use of TCP, and as such results in the unfair application of congestion control. We illustrate an example of this occurrence on a Tor node on the live network and also illustrate how packet dropping and reordering cause interference between the multiplexed streams.
   4172 Our solution is to use a TCP-over-DTLS (Datagram Transport Layer Security) transport between routers, and give each stream of data its own TCP connection. We give our design for our proposal, and details about its implementation. Finally, we perform experiments on our implemented version to illustrate that our proposal has in fact resolved the multiplexing issues discovered in our system performance analysis. The future work gives a number of steps towards optimizing and improving our work, along with some tangential ideas that were discovered during research.
   4173 Additionally, the open-source software projects latency proxy and libspe, which were designed for our purposes but programmed for universal applicability, are discussed},
   4174         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reardon-thesis.pdf},
   4175         author = {Reardon, Joel}
   4176 }
   4177 @conference {2008_7,
   4178         title = {Improving User and ISP Experience through ISP-aided P2P Locality},
   4179         booktitle = {GI'08. Proceedings of 11th IEEE Global Internet Symposium 2008},
   4180         year = {2008},
   4181         month = apr,
   4182         publisher = {IEEE Computer Society},
   4183         organization = {IEEE Computer Society},
   4184         address = {Phoenix, AZ},
   4185         abstract = {Despite recent improvements, P2P systems are still plagued by fundamental issues such as overlay/underlay topological and routing mismatch, which affects their performance and causes traffic strains on the ISPs. In this work, we aim to improve overall system performance for ISPs as well as P2P systems by means of traffic localization through improved collaboration between ISPs and P2P systems. More specifically, we study the effects of different ISP/P2P topologies as well as a broad range of influential user behavior characteristics, namely content availability, churn, and query patterns, on end-user and ISP experience. We show that ISP-aided P2P locality benefits both P2P users and ISPs, measured in terms of improved content download times, increased network locality of query responses and desired content, and overall reduction in P2P traffic},
   4186         www_section = {isp, P2P},
   4187         isbn = {978-1-4244-2219-7 },
   4188         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/isp-aidedp2p.PDF},
   4189         author = {Vinay Aggarwal and Obi Akonjang and Feldmann, Anja}
   4190 }
   4191 @conference {ccs2008:mittal,
   4192         title = {Information Leaks in Structured Peer-to-peer Anonymous Communication Systems},
   4193         booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications Security (CCS 2008)},
   4194         year = {2008},
   4195         month = {October},
   4196         pages = {267--278},
   4197         publisher = {ACM Press},
   4198         organization = {ACM Press},
   4199         address = {Alexandria, Virginia, USA},
   4200         abstract = {We analyze information leaks in the lookup mechanisms of structured peer-to-peer anonymous communication systems and how these leaks can be used to compromise anonymity. We show that the techniques that are used to combat active attacks on the lookup mechanism dramatically increase information leaks and increase the efficacy of passive attacks. Thus there is a trade-off between robustness to active and passive attacks.
   4201 
   4202 We study this trade-off in two P2P anonymous systems, Salsa and AP3. In both cases, we find that, by combining both passive and active attacks, anonymity can be compromised much more effectively than previously thought, rendering these systems insecure for most proposed uses. Our results hold even if security parameters are changed or other improvements to the systems are considered. Our study therefore motivates the search for new approaches to P2P anonymous communication},
   4203         www_section = {anonymity, attack, information leaks, P2P},
   4204         isbn = {978-1-59593-810-7},
   4205         doi = {10.1145/1455770.1455805},
   4206         url = {http://portal.acm.org/citation.cfm?id=1455805},
   4207         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs2008-mittal.pdf},
   4208         author = {Prateek Mittal and Borisov, Nikita},
   4209         editor = {Paul Syverson and Somesh Jha and Xiaolan Zhang}
   4210 }
   4211 @article {Chen:2008:IRS:1331483.1331515,
   4212         title = {Insight into redundancy schemes in DHTs},
   4213         journal = {Journal of Supercomputing},
   4214         volume = {43},
   4215         year = {2008},
   4216         month = feb,
   4217         pages = {183--198},
   4218         publisher = {Kluwer Academic Publishers},
   4219         address = {Hingham, MA, USA},
   4220         abstract = {In order to provide high data availability in peer-to-peer (P2P) DHTs, proper data redundancy schemes are required. This paper compares two popular schemes: replication and erasure coding. Unlike previous comparison, we take user download behavior into account. Furthermore, we propose a hybrid redundancy scheme, which shares user downloaded files for subsequent accesses and utilizes erasure coding to adjust file availability. Comparison experiments of three schemes show that replication saves more bandwidth than erasure coding, although it requires more storage space, when average node availability is higher than 47\%; moreover, our hybrid scheme saves more maintenance bandwidth with acceptable redundancy factor},
   4221         www_section = {distributed hash table, erasure coding, peer-to-peer networking, redundancy, Replication},
   4222         issn = {0920-8542},
   4223         doi = {10.1007/s11227-007-0126-4},
   4224         url = {http://dl.acm.org/citation.cfm?id=1331483.1331515},
   4225         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Supercomputing\%20-\%20Insight\%20into\%20redundancy\%20schemes\%20in\%20DHTs.pdf},
   4226         author = {Chen, Guihai and Qiu, Tongqing and Wu, Fan}
   4227 }
   4228 @conference {DBLP:conf/usenix/HiblerRSDGSWL08,
   4229         title = {Large-scale Virtualization in the Emulab Network Testbed},
   4230         booktitle = {USENIX Annual Technical Conference},
   4231         year = {2008},
   4232         pages = {113--128},
   4233         www_section = {emulab, emulation, testbed, virtualization},
   4234         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/emulab.pdf},
   4235         author = {Mike Hibler and Robert Ricci and Leigh Stoller and Jonathon Duerig and Shashi Guruprasad and Tim Stack and Kirk Webb and Jay Lepreau}
   4236 }
   4237 @article {nussbaum2008p2plab,
   4238         title = {Lightweight emulation to study peer-to-peer systems},
   4239         journal = {Concurrency and Computation: Practice and Experience},
   4240         volume = {20},
   4241         number = {6},
   4242         year = {2008},
   4243         pages = {735--749},
   4244         publisher = {John Wiley \& Sons, Ltd},
   4245         www_section = {BitTorrent, emulation, evaluation, network, peer-to-peer, virtualization},
   4246         issn = {1532-0634},
   4247         doi = {10.1002/cpe.1242},
   4248         url = {http://dx.doi.org/10.1002/cpe.1242},
   4249         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p2plab-cpe.pdf},
   4250         author = {Nussbaum, Lucas and Richard, Olivier}
   4251 }
   4252 @article {1390683,
   4253         title = {Linear-Time Computation of Similarity Measures for Sequential Data},
   4254         journal = {J. Mach. Learn. Res},
   4255         volume = {9},
   4256         year = {2008},
   4257         pages = {23--48},
   4258         publisher = {JMLR.org},
   4259         abstract = {Efficient and expressive comparison of sequences is an essential procedure for learning with sequential data. In this article we propose a generic framework for computation of similarity measures for sequences, covering various kernel, distance and non-metric similarity functions. The basis for comparison is embedding of sequences using a formal language, such as a set of natural words, k-grams or all contiguous subsequences. As realizations of the framework we provide linear-time algorithms of different complexity and capabilities using sorted arrays, tries and suffix trees as underlying data structures.
   4260 
   4261 Experiments on data sets from bioinformatics, text processing and computer security illustrate the efficiency of the proposed algorithms---enabling peak performances of up to 106 pairwise comparisons per second. The utility of distances and non-metric similarity measures for sequences as alternatives to string kernels is demonstrated in applications of text categorization, network intrusion detection and transcription site recognition in DNA},
   4262         issn = {1532-4435},
   4263         url = {http://portal.acm.org/citation.cfm?id=1390683$\#$},
   4264         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jmlr08.pdf},
   4265         author = {Rieck, Konrad and Laskov, Pavel}
   4266 }
   4267 @article {1358311,
   4268         title = {Linyphi: creating IPv6 mesh networks with SSR},
   4269         journal = {Concurr. Comput. : Pract. Exper},
   4270         volume = {20},
   4271         number = {6},
   4272         year = {2008},
   4273         pages = {675--691},
   4274         publisher = {John Wiley and Sons Ltd},
   4275         address = {Chichester, UK},
   4276         abstract = {Scalable source routing (SSR) is a self-organizing routing protocol which is especially suited for networks that do not have a well-crafted structure, e.g. ad hoc and mesh networks. SSR works on a flat identifier space. As a consequence, it can easily support host mobility without requiring any location directory or other centralized service. SSR is based on a virtual ring structure, which is used in a chord-like manner to obtain source routes to previously unknown destinations. It has been shown that SSR requires very little per node state and produces very little control messages. In particular, SSR has been found to outperform other ad hoc routing protocols such as ad hoc on-demand distance vector routing, optimized link-state routing, or beacon vector routing. In this paper we present Linyphi, an implementation of SSR for wireless access routers. Linyphi combines IPv6 and SSR so that unmodified IPv6 hosts have transparent connectivity to both the Linyphi mesh network and the IPv4-v6 Internet. We give a basic outline of the implementation and demonstrate its suitability in real-world mesh network scenarios. Furthermore, we illustrate the use of Linyphi for distributed applications such as the Linyphone peer-to-peer VoIP application. Copyright {\textcopyright} 2008 John Wiley \& Sons, Ltd},
   4277         www_section = {scalable source routing},
   4278         issn = {1532-0626},
   4279         doi = {10.1002/cpe.v20:6},
   4280         url = {http://portal.acm.org/citation.cfm?id=1358302.1358311$\#$},
   4281         author = {Di, Pengfei and Johannes Eickhold and Thomas Fuhrmann}
   4282 }
   4283 @conference {murdoch-pet2008,
   4284         title = {Metrics for Security and Performance in Low-Latency Anonymity Networks},
   4285         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4286         year = {2008},
   4287         month = {July},
   4288         pages = {115--132},
   4289         publisher = {Springer},
   4290         organization = {Springer},
   4291         address = {Leuven, Belgium},
   4292         www_section = {anonymity, Tor},
   4293         isbn = {978-3-540-70629-8},
   4294         doi = {10.1007/978-3-540-70630-4_8},
   4295         url = {http://portal.acm.org/citation.cfm?id=1428259.1428267},
   4296         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/murdoch-pet2008.pdf},
   4297         author = {Steven J. Murdoch and Robert N. M. Watson},
   4298         editor = {Borisov, Nikita and Ian Goldberg}
   4299 }
   4300 @conference {pizzonia2008netkit,
   4301         title = {Netkit: easy emulation of complex networks on inexpensive hardware},
   4302         booktitle = {Proceedings of the 4th International Conference on Testbeds and research infrastructures for the development of networks \& communities},
   4303         series = {TridentCom '08},
   4304         year = {2008},
   4305         pages = {7:1--7:10},
   4306         publisher = {ICST (Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering)},
   4307         organization = {ICST (Institute for Computer Sciences, Social-Informatics and Telecommunications Engineering)},
   4308         address = {ICST, Brussels, Belgium, Belgium},
   4309         www_section = {network emulation, routing, user-mode Linux, virtual laboratories},
   4310         isbn = {978-963-9799-24-0},
   4311         url = {http://dl.acm.org/citation.cfm?id=1390576.1390585},
   4312         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/a7-pizzonia.pdf},
   4313         author = {Pizzonia, Maurizio and Rimondini, Massimo}
   4314 }
   4315 @article {1341892,
   4316         title = {ODSBR: An on-demand secure Byzantine resilient routing protocol for wireless ad hoc networks},
   4317         journal = {ACM Trans. Inf. Syst. Secur},
   4318         volume = {10},
   4319         number = {4},
   4320         year = {2008},
   4321         pages = {1--35},
   4322         publisher = {ACM},
   4323         address = {New York, NY, USA},
   4324         abstract = {Ah hoc networks offer increased coverage by using multihop communication. This architecture makes services more vulnerable to internal attacks coming from compromised nodes that behave arbitrarily to disrupt the network, also referred to as Byzantine attacks. In this work, we examine the impact of several Byzantine attacks performed by individual or colluding attackers. We propose ODSBR, the first on-demand routing protocol for ad hoc wireless networks that provides resilience to Byzantine attacks caused by individual or colluding nodes. The protocol uses an adaptive probing technique that detects a malicious link after log n faults have occurred, where n is the length of the path. Problematic links are avoided by using a route discovery mechanism that relies on a new metric that captures adversarial behavior. Our protocol never partitions the network and bounds the amount of damage caused by attackers. We demonstrate through simulations ODSBR's effectiveness in mitigating Byzantine attacks. Our analysis of the impact of these attacks versus the adversary's effort gives insights into their relative strengths, their interaction, and their importance when designing multihop wireless routing protocols},
   4325         www_section = {ad-hoc networks, byzantine fault tolerance, on-demand routing, security model},
   4326         issn = {1094-9224},
   4327         doi = {10.1145/1284680.1341892},
   4328         url = {http://portal.acm.org/citation.cfm?id=1284680.1341892$\#$},
   4329         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ODSBR-TISSEC.pdf},
   4330         author = {Awerbuch, Baruch and Curtmola, Reza and Holmer, David and Nita-Rotaru, Cristina and Rubens, Herbert}
   4331 }
   4332 @conference {Hartline:2008:OMD:1374376.1374390,
   4333         title = {Optimal mechanism design and money burning},
   4334         booktitle = {STOC'08. Proceedings of the 40th annual ACM Symposium on Theory of Computing},
   4335         series = {STOC '08},
   4336         year = {2008},
   4337         month = may,
   4338         pages = {75--84},
   4339         publisher = {ACM},
   4340         organization = {ACM},
   4341         address = {Victoria, British Columbia, Canada},
   4342         abstract = {Mechanism design is now a standard tool in computer science for aligning the incentives of self-interested agents with the objectives of a system designer. There is, however, a fundamental disconnect between the traditional application domains of mechanism design (such as auctions) and those arising in computer science (such as networks): while monetary "transfers" (i.e., payments) are essential for most of the known positive results in mechanism design, they are undesirable or even technologically infeasible in many computer systems. Classical impossibility results imply that the reach of mechanisms without transfers is severely limited. Computer systems typically do have the ability to reduce service quality--routing systems can drop or delay traffic, scheduling protocols can delay the release of jobs, and computational payment schemes can require computational payments from users (e.g., in spam-fighting systems). Service degradation is tantamount to requiring that users "burn money", and such "payments" can be used to influence the preferences of the agents at a cost of degrading the social surplus. We develop a framework for the design and analysis of "money-burning mechanisms" to maximize the residual surplus-the total value of the chosen outcome minus the payments required. Our primary contributions are the following. * We define a general template for prior-free optimal mechanism design that explicitly connects Bayesian optimal mechanism design, the dominant paradigm in economics, with worst-case analysis. In particular, we establish a general and principled way to identify appropriate performance benchmarks in prior-free mechanism design. * For general single-parameter agent settings, we characterize the Bayesian optimal money-burning mechanism. * For multi-unit auctions, we design a near-optimal prior-free money-burning mechanism: for every valuation profile, its expected residual surplus is within a constant factor of our benchmark, the residual surplus of the best Bayesian optimal mechanism for this profile. * For multi-unit auctions, we quantify the benefit of general transfers over money-burning: optimal money-burning mechanisms always obtain a logarithmic fraction of the full social surplus, and this bound is tight},
   4343         www_section = {mechanism design, money burning, optimal mechanism design},
   4344         isbn = {978-1-60558-047-0},
   4345         doi = {http://doi.acm.org/10.1145/1374376.1374390},
   4346         url = {http://doi.acm.org/10.1145/1374376.1374390},
   4347         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2708\%20-\%20Optimal\%20mechanism\%20design\%20and\%20money\%20burning.pdf},
   4348         author = {Jason D. Hartline and Roughgarden, Tim}
   4349 }
   4350 @article {Xie:2008:PPP:1402946.1402999,
   4351         title = {P4P: Provider Portal for Applications},
   4352         journal = {SIGCOMM Computer Communication Review},
   4353         volume = {38},
   4354         year = {2008},
   4355         month = aug,
   4356         pages = {351--362},
   4357         publisher = {ACM},
   4358         address = {New York, NY, USA},
   4359         abstract = {As peer-to-peer (P2P) emerges as a major paradigm for scalable network application design, it also exposes significant new challenges in achieving efficient and fair utilization of Internet network resources. Being largely network-oblivious, many P2P applications may lead to inefficient network resource usage and/or low application performance. In this paper, we propose a simple architecture called P4P to allow for more effective cooperative traffic control between applications and network providers. We conducted extensive simulations and real-life experiments on the Internet to demonstrate the feasibility and effectiveness of P4P. Our experiments demonstrated that P4P either improves or maintains the same level of application performance of native P2P applications, while, at the same time, it substantially reduces network provider cost compared with either native or latency-based localized P2P applications},
   4360         www_section = {network application, network architecture, P2P},
   4361         issn = {0146-4833},
   4362         doi = {http://doi.acm.org/10.1145/1402946.1402999},
   4363         url = {http://doi.acm.org/10.1145/1402946.1402999},
   4364         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20P4P\%3A\%20Provider\%20Portal\%20for\%20Applications.pdf},
   4365         author = {Xie, Haiyong and Yang, Y. Richard and Krishnamurthy, Arvind and Liu, Yanbin Grace and Silberschatz, Abraham}
   4366 }
   4367 @conference {raykova-pet2008,
   4368         title = {PAR: Payment for Anonymous Routing},
   4369         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4370         year = {2008},
   4371         month = {July},
   4372         pages = {219--236},
   4373         publisher = {Springer},
   4374         organization = {Springer},
   4375         address = {Leuven, Belgium},
   4376         abstract = {Despite the growth of the Internet and the increasing concern for privacy of online communications, current deployments of anonymization networks depend on a very small set of nodes that volunteer their bandwidth. We believe that the main reason is not disbelief in their ability to protect anonymity, but rather the practical limitations in bandwidth and latency that stem from limited participation. This limited participation, in turn, is due to a lack of incentives to participate. We propose providing economic incentives, which historically have worked very well.
   4377 In this paper, we demonstrate a payment scheme that can be used to compensate nodes which provide anonymity in Tor, an existing onion routing, anonymizing network. We show that current anonymous payment schemes are not suitable and introduce a hybrid payment system based on a combination of the Peppercoin Micropayment system and a new type of {\textquotedblleft}one use{\textquotedblright} electronic cash. Our system claims to maintain users' anonymity, although payment techniques mentioned previously -- when adopted individually -- provably fail},
   4378         www_section = {anonymity, onion routing, Tor},
   4379         doi = {10.1007/978-3-540-70630-4},
   4380         url = {http://www.springerlink.com/content/r1h1046823587382/},
   4381         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raykova-pet2008.pdf},
   4382         author = {Elli Androulaki and Mariana Raykova and Shreyas Srivatsan and Angelos Stavrou and Steven M. Bellovin},
   4383         editor = {Borisov, Nikita and Ian Goldberg}
   4384 }
   4385 @conference {ccs2008:tsang,
   4386         title = {PEREA: Towards Practical TTP-Free Revocation in Anonymous Authentication},
   4387         booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications Security (CCS 2008)},
   4388         year = {2008},
   4389         month = {October},
   4390         pages = {333--345},
   4391         publisher = {ACM Press},
   4392         organization = {ACM Press},
   4393         address = {Alexandria, Virginia, USA},
   4394         www_section = {non-membership proofs, subjective blacklisting},
   4395         isbn = {978-1-59593-810-7},
   4396         doi = {10.1145/1455770.1455813},
   4397         url = {http://portal.acm.org/citation.cfm?id=1455813},
   4398         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/perea-ccs08.pdf},
   4399         author = {Patrick P. Tsang and Man Ho Au and Apu Kapadia and Sean Smith},
   4400         editor = {Paul Syverson and Somesh Jha and Xiaolan Zhang}
   4401 }
   4402 @conference {troncoso-pet2008,
   4403         title = {Perfect Matching Statistical Disclosure Attacks},
   4404         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4405         year = {2008},
   4406         month = {July},
   4407         pages = {2--23},
   4408         publisher = {Springer},
   4409         organization = {Springer},
   4410         address = {Leuven, Belgium},
   4411         abstract = {Traffic analysis is the best known approach to uncover relationships amongst users of anonymous communication systems, such as mix networks. Surprisingly, all previously published techniques require very specific user behavior to break the anonymity provided by mixes. At the same time, it is also well known that none of the considered user models reflects realistic behavior which casts some doubt on previous work with respect to real-life scenarios. We first present a user behavior model that, to the best of our knowledge, is the least restrictive scheme considered so far. Second, we develop the Perfect Matching Disclosure Attack, an efficient attack based on graph theory that operates without any assumption on user behavior. The attack is highly effective when de-anonymizing mixing rounds because it considers all users in a round at once, rather than single users iteratively. Furthermore, the extracted sender-receiver relationships can be used to enhance user profile estimations. We extensively study the effectiveness and efficiency of our attack and previous work when de-anonymizing users communicating through a threshold mix. Empirical results show the advantage of our proposal. We also show how the attack can be refined and adapted to different scenarios including pool mixes, and how precision can be traded in for speed, which might be desirable in certain cases },
   4412         www_section = {mix, traffic analysis},
   4413         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.147.4953},
   4414         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/troncoso-pet2008.pdf},
   4415         author = {Carmela Troncoso and Benedikt Gierlichs and Bart Preneel and Ingrid Verbauwhede},
   4416         editor = {Borisov, Nikita and Ian Goldberg}
   4417 }
   4418 @conference {loesing2008performance,
   4419         title = {Performance Measurements and Statistics of Tor Hidden Services},
   4420         booktitle = {Proceedings of the 2008 International Symposium on Applications and the Internet (SAINT)},
   4421         year = {2008},
   4422         month = {July},
   4423         publisher = {IEEE CS Press},
   4424         organization = {IEEE CS Press},
   4425         address = {Turku, Finland},
   4426         abstract = {Tor (The Onion Routing) provides a secure mechanism for offering TCP-based services while concealing the hidden server's IP address. In general the acceptance of services strongly relies on its QoS properties. For potential Tor users, provided the anonymity is secured, probably the most important QoS parameter is the time until they finally get response by such a hidden service. Internally, overall response times are constituted by several steps invisible for the user. We provide comprehensive measurements of all relevant latencies and a detailed statistical analysis with special focus on the overall response times. Thereby, we gain valuable insights that enable us to give certain statistical assertions and to suggest improvements in the hidden service protocol and its implementation},
   4427         www_section = {anonymity, performance, privacy, statistical analysis},
   4428         isbn = {978-0-7695-3297-4},
   4429         doi = {10.1109/SAINT.2008.69},
   4430         url = {http://portal.acm.org/citation.cfm?id=1441426.1441996},
   4431         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/loesing2008performance.pdf},
   4432         author = {Karsten Loesing and Werner Sandmann and Christian Wilms and Guido Wirtz}
   4433 }
   4434 @conference {Shafaat:2008:PAN:1485753.1485763,
   4435         title = {A Practical Approach to Network Size Estimation for Structured Overlays},
   4436         booktitle = {IWSOS'08--Proceedings of the 3rd International Workshop on Self-Organizing Systems},
   4437         series = {Lecture Notes in Computer Science},
   4438         volume = {5343},
   4439         year = {2008},
   4440         month = dec,
   4441         pages = {71--83},
   4442         publisher = {Springer-Verlag},
   4443         organization = {Springer-Verlag},
   4444         address = {Vienna, Austria},
   4445         abstract = {Structured overlay networks have recently received much attention due to their self-* properties under dynamic and decentralized settings. The number of nodes in an overlay fluctuates all the time due to churn. Since knowledge of the size of the overlay is a core requirement for many systems, estimating the size in a decentralized manner is a challenge taken up by recent research activities. Gossip-based Aggregation has been shown to give accurate estimates for the network size, but previous work done is highly sensitive to node failures. In this paper, we present a gossip-based aggregation-style network size estimation algorithm. We discuss shortcomings of existing aggregation-based size estimation algorithms, and give a solution that is highly robust to node failures and is adaptive to network delays. We examine our solution in various scenarios to demonstrate its effectiveness},
   4446         www_section = {network size estimation, structured overlays},
   4447         isbn = {978-3-540-92156-1},
   4448         doi = {http://dx.doi.org/10.1007/978-3-540-92157-8_7},
   4449         url = {http://dx.doi.org/10.1007/978-3-540-92157-8_7},
   4450         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IWSOS\%2708\%20-\%20Network\%20Size\%20Estimation\%20for\%20Structured\%20Overlays.pdf},
   4451         author = {Shafaat, Tallat M. and Ali Ghodsi and Seif Haridi}
   4452 }
   4453 @article {2008_8,
   4454         title = {Privacy guarantees through distributed constraint satisfaction},
   4455         number = {12},
   4456         year = {2008},
   4457         month = apr,
   4458         institution = {Swiss Federal Institute of Technology (EPFL)},
   4459         type = {Tech report},
   4460         address = {Lausanne, Switzerland},
   4461         abstract = {Abstract. In Distributed Constraint Satisfaction Problems, agents often desire to find a solution while revealing as little as possible about their variables and constraints. So far, most algorithms for DisCSP do not guarantee privacy of this information. This paper describes some simple obfuscation techniques that can be used with DisCSP algorithms such as DPOP, and provide sensible privacy guarantees based on the distributed solving process without sacrificing its efficiency},
   4462         www_section = {algorithms, DisCSP algorithm, distributed constraint satisfaction, optimization, privacy, SMC},
   4463         journal = {unknown},
   4464         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20Privacy\%20guarantees\%20through\%20DCS.pdf},
   4465         author = {Boi Faltings and Thomas Leaute and Adrian Petcu}
   4466 }
   4467 @book {springerlink:10.1007/978-0-387-70992-5,
   4468         title = {Privacy-Preserving Data Mining: Models and Algorithms},
   4469         series = {Advances in Database Systems},
   4470         volume = {34},
   4471         year = {2008},
   4472         publisher = {Springer US},
   4473         organization = {Springer US},
   4474         isbn = {978-0-387-70992-5},
   4475         author = {Aggarwal, Charu C. and Yu, Philip S.}
   4476 }
   4477 @article {2008_9,
   4478         title = {Progressive Strategies for Monte-Carlo Tree Search},
   4479         journal = {New Mathematics and Natural Computation},
   4480         volume = {4},
   4481         year = {2008},
   4482         pages = {343--357},
   4483         abstract = {Monte-Carlo Tree Search (MCTS) is a new best-first search guided by the results of Monte-Carlo simulations. In this article, we introduce two progressive strategies for MCTS, called progressive bias and progressive unpruning. They enable the use of relatively time-expensive heuristic knowledge without speed reduction. Progressive bias directs the search according to heuristic knowledge. Progressive unpruning first reduces the branching factor, and then increases it gradually again. Experiments assess that the two progressive strategies significantly improve the level of our Go program Mango. Moreover, we see that the combination of both strategies performs even better on larger board sizes},
   4484         www_section = {computer go, MCTS heuristic search, Monte-Carlo Tree Search},
   4485         doi = {10.1142/S1793005708001094},
   4486         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NMNC\%20-\%20Progressive\%20strategies\%20for\%20MCTS.pdf},
   4487         author = {Guillaume M. J-B. Chaslot and Mark H. M. Winands and H. Jaap van den Herik and Jos W. H. M. Uiterwijk and Bruno Bouzy}
   4488 }
   4489 @conference {di08iptps,
   4490         title = {Providing KBR Service for Multiple Applications},
   4491         booktitle = {The 7th International Workshop on Peer-to-Peer Systems (IPTPS '08)},
   4492         year = {2008},
   4493         type = {publication},
   4494         address = {St. Petersburg, U.S},
   4495         abstract = {Key based routing (KBR) enables peer-to-peer applications to create and use distributed services. KBR is more flexible than distributed hash tables (DHT). However, the broader the application area, the more important become performance issues for a KBR service. In this paper, we present a novel approach to provide a generic KBR service. Its key idea is to use a predictable address assignment scheme. This scheme allows peers to calculate the overlay address of the node that is responsible for a given key and application ID. A public DHT service such as OpenDHT can then resolve this overlay address to the transport address of the respective peer. We compare our solution to alternative proposals such as ReDiR and Diminished Chord. We conclude that our solution has a better worst case complexity for some important KBR operations and the required state. In particular, unlike ReDiR, our solution can guarantee a low latency for KBR route operations },
   4496         www_section = {distributed hash table, P2P},
   4497         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   4498         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di08iptps.pdf},
   4499         author = {Di, Pengfei and Kendy Kutzner and Thomas Fuhrmann}
   4500 }
   4501 @conference {quant-adhoc,
   4502         title = {Quantification of Anonymity for Mobile Ad Hoc Networks},
   4503         booktitle = {Proceedings of the 4th International Workshop on Security and Trust Management (STM 08)},
   4504         year = {2008},
   4505         month = {June},
   4506         pages = {25--36},
   4507         publisher = {Elsevier Science Publishers B. V.  Amsterdam, The Netherlands, The Netherlands},
   4508         organization = {Elsevier Science Publishers B. V.  Amsterdam, The Netherlands, The Netherlands},
   4509         address = {Trondheim, Norway},
   4510         abstract = {We propose a probabilistic system model for anonymous ad hoc routing protocols that takes into account the a priori knowledge of the adversary, and illustrate how the information theoretical entropy can be used for quantification of the anonymity offered by a routing protocol as the adversary captures an increasing number of nodes in the network. The proposed measurement schema is applied to ANODR and ARM routing protocols},
   4511         www_section = {ad-hoc networks, anonymity, routing, security model},
   4512         doi = {10.1016/j.entcs.2009.07.041},
   4513         url = {http://portal.acm.org/citation.cfm?id=1619033},
   4514         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/quant-adhoc.pdf},
   4515         author = {Marie Elisabeth Gaup Moe}
   4516 }
   4517 @conference {Goldberg:2008:RTA:1402958.1402989,
   4518         title = {Rationality and Traffic Attraction: Incentives for Honest Path Announcements in BGP},
   4519         booktitle = {SIGCOMM'08. Proceedings of the ACM SIGCOMM 2008 Conference on Data Communication},
   4520         series = {SIGCOMM Computer Communication Review},
   4521         year = {2008},
   4522         month = oct,
   4523         pages = {267--278},
   4524         publisher = {ACM},
   4525         organization = {ACM},
   4526         address = {Seattle, WA},
   4527         abstract = {We study situations in which autonomous systems (ASes) may have incentives to send BGP announcements differing from the AS-level paths that packets traverse in the data plane. Prior work on this issue assumed that ASes seek only to obtain the best possible outgoing path for their traffic. In reality, other factors can influence a rational AS's behavior. Here we consider a more natural model, in which an AS is also interested in attracting incoming traffic (e.g., because other ASes pay it to carry their traffic). We ask what combinations of BGP enhancements and restrictions on routing policies can ensure that ASes have no incentive to lie about their data-plane paths. We find that protocols like S-BGP alone are insufficient, but that S-BGP does suffice if coupled with additional (quite unrealistic) restrictions on routing policies. Our game-theoretic analysis illustrates the high cost of ensuring that the ASes honestly announce data-plane paths in their BGP path announcements},
   4528         www_section = {as, autonomus system, bgp, incentives},
   4529         isbn = {978-1-60558-175-0},
   4530         doi = {http://doi.acm.org/10.1145/1402958.1402989},
   4531         url = {http://doi.acm.org/10.1145/1402958.1402989},
   4532         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2708\%20-\%20Rationality\%20and\%20traffic\%20attraction.pdf},
   4533         author = {Goldberg, Sharon and Halevi, Shai and Jaggard, Aaron D. and Ramachandran, Vijay and Wright, Rebecca N.}
   4534 }
   4535 @conference {androulaki-pet2008,
   4536         title = {Reputation Systems for Anonymous Networks},
   4537         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4538         year = {2008},
   4539         month = {July},
   4540         pages = {202--218},
   4541         publisher = {Springer},
   4542         organization = {Springer},
   4543         address = {Leuven, Belgium},
   4544         abstract = {We present a reputation scheme for a pseudonymous peer-to-peer (P2P) system in an anonymous network. Misbehavior is one of the biggest problems in pseudonymous P2P systems, where there is little incentive for proper behavior. In our scheme, using ecash for reputation points, the reputation of each user is closely related to his real identity rather than to his current pseudonym. Thus, our scheme allows an honest user to switch to a new pseudonym keeping his good reputation, while hindering a malicious user from erasing his trail of evil deeds with a new pseudonym},
   4545         www_section = {anonymity, P2P, pseudonym},
   4546         isbn = {978-3-540-70629-8},
   4547         doi = {10.1007/978-3-540-70630-4_13},
   4548         url = {http://portal.acm.org/citation.cfm?id=1428259.1428272},
   4549         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/getTechreport.pdf},
   4550         author = {Elli Androulaki and Seung Geol Choi and Steven M. Bellovin and Tal Malkin},
   4551         editor = {Borisov, Nikita and Ian Goldberg}
   4552 }
   4553 @conference {2008_10,
   4554         title = {Robust De-anonymization of Large Sparse Datasets},
   4555         booktitle = {Proceedings of the 2008 IEEE Symposium on Security and Privacy},
   4556         year = {2008},
   4557         publisher = {IEEE Computer Society},
   4558         organization = {IEEE Computer Society},
   4559         address = {Washington, DC, USA},
   4560         abstract = {We present a new class of statistical deanonymization attacks against high-dimensional micro-data, such as individual preferences, recommendations, transaction records and so on. Our techniques are robust to perturbation in the data and tolerate some mistakes in the adversary's background knowledge. We apply our de-anonymization methodology to the Netflix Prize dataset, which contains anonymous movie ratings of 500,000 subscribers of Netflix, the world's largest online movie rental service. We demonstrate that an adversary who knows only a little bit about an individual subscriber can easily identify this subscriber's record in the dataset. Using the Internet Movie Database as the source of background knowledge, we successfully identified the Netflix records of known users, uncovering their apparent political preferences and other potentially sensitive information},
   4561         www_section = {anonymity, attack, privacy},
   4562         isbn = {978-0-7695-3168-7},
   4563         doi = {10.1109/SP.2008.33},
   4564         url = {http://dx.doi.org/10.1109/SP.2008.33},
   4565         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Deanonymization2008narayanan.pdf},
   4566         author = {Narayanan, Arvind and Shmatikov, Vitaly}
   4567 }
   4568 @conference {mccoy-pet2008,
   4569         title = {Shining Light in Dark Places: Understanding the Tor Network},
   4570         booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing Technologies (PETS 2008)},
   4571         year = {2008},
   4572         month = {July},
   4573         pages = {63--76},
   4574         publisher = {Springer},
   4575         organization = {Springer},
   4576         address = {Leuven, Belgium},
   4577         abstract = {To date, there has yet to be a study that characterizes the usage of a real deployed anonymity service. We present observations and analysis obtained by participating in the Tor network. Our primary goals are to better understand Tor as it is deployed and through this understanding, propose improvements. In particular, we are interested in answering the following questions: (1) How is Tor being used? (2) How is Tor being mis-used? (3) Who is using Tor?
   4578 
   4579 To sample the results, we show that web traffic makes up the majority of the connections and bandwidth, but non-interactive protocols consume a disproportionately large amount of bandwidth when compared to interactive protocols. We provide a survey of how Tor is being misused, both by clients and by Tor router operators. In particular, we develop a method for detecting exit router logging (in certain cases). Finally, we present evidence that Tor is used throughout the world, but router participation is limited to only a few countries},
   4580         www_section = {anonymity, Tor},
   4581         isbn = {978-3-540-70629-8},
   4582         doi = {10.1007/978-3-540-70630-4_5},
   4583         url = {http://portal.acm.org/citation.cfm?id=1428264},
   4584         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mccoy-pet2008.pdf},
   4585         author = {Damon McCoy and Kevin Bauer and Dirk Grunwald and Tadayoshi Kohno and Douglas Sicker},
   4586         editor = {Borisov, Nikita and Ian Goldberg}
   4587 }
   4588 @article {1461118,
   4589         title = {Shortest-path routing in randomized DHT-based Peer-to-Peer systems},
   4590         journal = {Comput. Netw},
   4591         volume = {52},
   4592         number = {18},
   4593         year = {2008},
   4594         pages = {3307--3317},
   4595         publisher = {Elsevier North-Holland, Inc},
   4596         address = {New York, NY, USA},
   4597         abstract = {Randomized DHT-based Peer-to-Peer (P2P) systems grant nodes certain flexibility in selecting their overlay neighbors, leading to irregular overlay structures but to better overall performance in terms of path latency, static resilience and local convergence. However, routing in the presence of overlay irregularity is challenging. In this paper, we propose a novel routing protocol, RASTER, that approximates shortest overlay routes between nodes in randomized DHTs. Unlike previously proposed routing protocols, RASTER encodes and aggregates routing information. Its simple bitmap-encoding scheme together with the proposed RASTER routing algorithm enable a performance edge over current overlay routing protocols. RASTER provides a forwarding overhead of merely a small constant number of bitwise operations, a routing performance close to optimal, and a better resilience to churn. RASTER also provides nodes with the flexibility to adjust the size of the maintained routing information based on their storage/processing capabilities. The cost of storing and exchanging encoded routing information is manageable and grows logarithmically with the number of nodes in the system},
   4598         www_section = {distributed hash table, P2P, routing},
   4599         issn = {1389-1286},
   4600         doi = {10.1016/j.comnet.2008.07.014},
   4601         url = {http://portal.acm.org/citation.cfm?id=1461118$\#$},
   4602         author = {Wang, Chih-Chiang and Harfoush, Khaled}
   4603 }
   4604 @booklet {2008_11,
   4605         title = {The Spontaneous Virtual Networks Architecture for Supporting Future Internet Services and Applications},
   4606         year = {2008},
   4607         note = {Vortrag auf dem Fachgespr{\"a}ch der GI/ITG-Fachgruppe {\textquoteleft}{\textquoteleft}Kommunikation und Verteilte Systeme'' Future Internet},
   4608         publisher = {NEC, Heidelberg},
   4609         author = {Roland Bless and Oliver Waldhorst and Mayer, Christoph P.}
   4610 }
   4611 @conference {conf/infocom/WangLX08,
   4612         title = {Stable Peers: Existence, Importance, and Application in Peer-to-Peer Live Video Streaming},
   4613         booktitle = {INFOCOM'08. Proceedings of the 27th IEEE International Conference on Computer Communications},
   4614         year = {2008},
   4615         month = apr,
   4616         pages = {1364--1372},
   4617         publisher = {IEEE Computer Society},
   4618         organization = {IEEE Computer Society},
   4619         address = {Phoenix, AZ, USA},
   4620         abstract = {This paper presents a systematic in-depth study on the existence, importance, and application of stable nodes in peer- to-peer live video streaming. Using traces from a real large-scale system as well as analytical models, we show that, while the number of stable nodes is small throughout a whole session, their longer lifespans make them constitute a significant portion in a per-snapshot view of a peer-to-peer overlay. As a result, they have substantially affected the performance of the overall system. Inspired by this, we propose a tiered overlay design, with stable nodes being organized into a tier-1 backbone for serving tier-2 nodes. It offers a highly cost-effective and deployable alternative to proxy-assisted designs. We develop a comprehensive set of algorithms for stable node identification and organization. Specifically, we present a novel structure, Labeled Tree, for the tier-1 overlay, which, leveraging stable peers, simultaneously achieves low overhead and high transmission reliability. Our tiered framework flexibly accommodates diverse existing overlay structures in the second tier. Our extensive simulation results demonstrated that the customized optimization using selected stable nodes boosts the streaming quality and also effectively reduces the control overhead. This is further validated through prototype experiments over the PlanetLab network},
   4621         www_section = {peer-to-peer live video streaming, stable peer},
   4622         isbn = {978-1-4244-2025-4 },
   4623         doi = {http://dx.doi.org/10.1109/INFOCOM.2008.194},
   4624         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2708\%20-\%20Stable\%20peers.PDF},
   4625         author = {Wang, Feng and Liu, Jiangchuan and Xiong, Yongqiang}
   4626 }
   4627 @booklet {DD08Survey,
   4628         title = {A Survey of Anonymous Communication Channels},
   4629         number = {MSR-TR-2008-35},
   4630         year = {2008},
   4631         month = jan,
   4632         publisher = {Microsoft Research},
   4633         abstract = {We present an overview of the field of anonymous communications, from its establishment in 1981 from David Chaum to today. Key systems are presented categorized according to their underlying principles: semi-trusted relays, mix systems, remailers, onion routing, and systems to provide robust mixing. We include extended discussions of the threat models and usage models that different schemes provide, and the trade-offs between the security properties offered and the communication characteristics different systems support},
   4634         www_section = {onion routing, robustness},
   4635         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.138.7951},
   4636         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DD08Survey.pdf},
   4637         author = {George Danezis and Claudia Diaz}
   4638 }
   4639 @conference {Infocom2008,
   4640         title = {Swarming on Optimized Graphs for n-way Broadcast},
   4641         booktitle = {Proceedings of IEEE INFOCOM 2008},
   4642         year = {2008},
   4643         month = apr,
   4644         address = {Phoenix, AZ},
   4645         abstract = {In an n-way broadcast application each one of n overlay nodes wants to push its own distinct large data file to all other n-1 destinations as well as download their respective data files. BitTorrent-like swarming protocols are ideal choices for handling such massive data volume transfers. The original BitTorrent targets one-to-many broadcasts of a single file to a very large number of receivers and thus, by necessity, employs an almost random overlay topology. n-way broadcast applications on the other hand, owing to their inherent n-squared nature, are realizable only in small to medium scale networks. In this paper, we show that we can leverage this scale constraint to construct optimized overlay topologies that take into consideration the end-to-end characteristics of the network and as a consequence deliver far superior performance compared to random and myopic (local) approaches. We present the Max-Min and Max- Sum peer-selection policies used by individual nodes to select their neighbors. The first one strives to maximize the available bandwidth to the slowest destination, while the second maximizes the aggregate output rate. We design a swarming protocol suitable for n-way broadcast and operate it on top of overlay graphs formed by nodes that employ Max-Min or Max-Sum policies. Using trace-driven simulation and measurements from a PlanetLab prototype implementation, we demonstrate that the performance of swarming on top of our constructed topologies is far superior to the performance of random and myopic overlays. Moreover, we show how to modify our swarming protocol to allow it to accommodate selfish nodes},
   4646         www_section = {EGOIST, game theory, routing},
   4647         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Infocom2008.pdf},
   4648         author = {Georgios Smaragdakis and Nikolaos Laoutaris and Pietro Michiardi and Azer Bestavros and Byers, John W. and Mema Roussopoulos}
   4649 }
   4650 @conference {1456474,
   4651         title = {Tahoe: the least-authority filesystem},
   4652         booktitle = {StorageSS '08: Proceedings of the 4th ACM international workshop on Storage security and survivability},
   4653         year = {2008},
   4654         pages = {21--26},
   4655         publisher = {ACM},
   4656         organization = {ACM},
   4657         address = {New York, NY, USA},
   4658         abstract = {Tahoe is a system for secure, distributed storage. It uses capabilities for access control, cryptography for confidentiality and integrity, and erasure coding for fault-tolerance. It has been deployed in a commercial backup service and is currently operational. The implementation is Open Source},
   4659         www_section = {capabilities, fault-tolerance, P2P},
   4660         isbn = {978-1-60558-299-3},
   4661         doi = {10.1145/1456469.1456474},
   4662         url = {http://portal.acm.org/citation.cfm?id=1456474$\#$},
   4663         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lafs.pdf},
   4664         author = {Wilcox-O'Hearn, Zooko and Warner, Brian}
   4665 }
   4666 @booklet {fuhrmann08comparable,
   4667         title = {Towards Comparable Network Simulations},
   4668         number = {2008-9},
   4669         year = {2008},
   4670         month = aug,
   4671         publisher = {Dept. of Computer Science, Universit{\"a}t Karlsruhe (TH)},
   4672         type = {Interner Bericht},
   4673         abstract = {Simulations have been a valuable and much used tool in networking research for decades. New protocols are evaluated by simulations. Often, competing designs are judged by their respective performance in simulations. Despite this great importance the state-of-the-art in network simulations is nevertheless still low. A recent survey showed that most publications in a top conference did not even give enough details to repeat the simulations. In this paper we go beyond repeatability and ask: Are different simulations comparable? We study various implementations of the IEEE 802.11 media access layer in ns-2 and OMNeT++ and report some dramatic differences. These findings indicate that two protocols cannot be compared meaningfully unless they are compared in the very same simulation environment. We claim that this problem limits the value of the respective publications because readers are forced to re-implement the work that is described in the paper rather than building on its results. Facing the additional problem that not all authors will agree on one simulator, we address ways of making different simulators comparable},
   4674         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   4675         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/towards_comparable_network_simulations.pdf},
   4676         author = {Di, Pengfei and Yaser Houri and Kendy Kutzner and Thomas Fuhrmann}
   4677 }
   4678 @conference {2008_12,
   4679         title = {Towards Empirical Aspects of Secure Scalar Product},
   4680         booktitle = {Information Security and Assurance, 2008. ISA 2008. International Conference on},
   4681         year = {2008},
   4682         month = {April},
   4683         abstract = {Privacy is ultimately important, and there is a fair amount of research about it. However, few empirical studies about the cost of privacy are conducted. In the area of secure multiparty computation, the scalar product has long been reckoned as one of the most promising building blocks in place of the classic logic gates. The reason is not only the scalar product complete, which is as good as logic gates, but also the scalar product is much more efficient than logic gates. As a result, we set to study the computation and communication resources needed for some of the most well-known and frequently referred secure scalar-product protocols, including the composite-residuosity, the invertible-matrix, the polynomial-sharing, and the commodity-based approaches. Besides the implementation remarks of these approaches, we analyze and compare their execution time, computation time, and random number consumption, which are the most concerned resources when talking about secure protocols. Moreover, Fairplay the benchmark approach implementing Yao's famous circuit evaluation protocol, is included in our experiments in order to demonstrate the potential for the scalar product to replace logic gates},
   4684         www_section = {circuit evaluation protocol, Circuits, commodity-based, composite residuosity, composite-residuosity, Computational efficiency, Costs, data privacy, empirical survey, Information science, information security, invertible-matrix, logic gates, polynomial-sharing, Polynomials, privacy, Proposals, protocols, scalar-product, secure multiparty computation, secure protocols, Secure scalar product, secure scalar-product protocols},
   4685         doi = {10.1109/ISA.2008.78},
   4686         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EmpiricalAspects2009Wang.pdf},
   4687         author = {I-Cheng Wang and Chih-Hao Shen and Tsan-sheng Hsu and Churn-Chung Liao and Da-Wei Wang and Zhan, J.}
   4688 }
   4689 @article {Pouwelse:2008:TSP:1331115.1331119,
   4690         title = {TRIBLER: a Social-based Peer-to-Peer System},
   4691         journal = {Concurrency and Computation: Practice \& Experience},
   4692         volume = {20},
   4693         year = {2008},
   4694         month = feb,
   4695         pages = {127--138},
   4696         publisher = {John Wiley and Sons Ltd},
   4697         address = {Chichester, UK},
   4698         abstract = {Most current peer-to-peer (P2P) file-sharing systems treat their users as anonymous, unrelated entities, and completely disregard any social relationships between them. However, social phenomena such as friendship and the existence of communities of users with similar tastes or interests may well be exploited in such systems in order to increase their usability and performance. In this paper we present a novel social-based P2P file-sharing paradigm that exploits social phenomena by maintaining social networks and using these in content discovery, content recommendation, and downloading. Based on this paradigm's main concepts such as taste buddies and friends, we have designed and implemented the TRIBLER P2P file-sharing system as a set of extensions to BitTorrent. We present and discuss the design of TRIBLER, and we show evidence that TRIBLER enables fast content discovery and recommendation at a low additional overhead, and a significant improvement in download performance. Copyright {\textcopyright} 2007 John Wiley \& Sons, Ltd},
   4699         www_section = {peer-to-peer networking, social-based, taste buddies},
   4700         issn = {1532-0626},
   4701         doi = {http://dx.doi.org/10.1002/cpe.v20:2},
   4702         url = {http://dl.acm.org/citation.cfm?id=1331115.1331119},
   4703         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Concurrency\%20and\%20Computation\%20-\%20TRIBLER.pdf},
   4704         author = {Johan Pouwelse and Garbacki, Pawel and Jun Wang and Arno Bakker and Jie Yang and Alexandru Iosup and Epema, Dick H. J. and Marcel J. T. Reinders and van Steen, Maarten and Henk J. Sips}
   4705 }
   4706 @conference {1424615,
   4707         title = {Trust-Rated Authentication for Domain-Structured Distributed Systems},
   4708         booktitle = {EuroPKI '08: Proceedings of the 5th European PKI workshop on Public Key Infrastructure},
   4709         year = {2008},
   4710         pages = {74--88},
   4711         publisher = {Springer-Verlag},
   4712         organization = {Springer-Verlag},
   4713         address = {Berlin, Heidelberg},
   4714         abstract = {We present an authentication scheme and new protocol for domain-based scenarios with inter-domain authentication. Our protocol is primarily intended for domain-structured Peer-to-Peer systems but is applicable for any domain scenario where clients from different domains wish to authenticate to each other. To this end, we make use of Trusted Third Parties in the form of Domain Authentication Servers in each domain. These act on behalf of their clients, resulting in a four-party protocol. If there is a secure channel between the Domain Authentication Servers, our protocol can provide secure authentication. To address the case where domains do not have a secure channel between them, we extend our scheme with the concept of trust-rating. Domain Authentication Servers signal security-relevant information to their clients (pre-existing secure channel or not, trust, ...). The clients evaluate this information to decide if it fits the security requirements of their application},
   4715         www_section = {authentication, distributed systems, P2P, PKI, trust},
   4716         isbn = {978-3-540-69484-7},
   4717         doi = {10.1007/978-3-540-69485-4},
   4718         url = {http://www.springerlink.com/content/k6786282r5378k42/},
   4719         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AuthenticationEuroPKI2008.pdf},
   4720         author = {Ralph Holz and Heiko Niedermayer and Hauck, Peter and Carle, Georg}
   4721 }
   4722 @conference {snader08,
   4723         title = {A Tune-up for Tor: Improving Security and Performance in the Tor Network},
   4724         booktitle = {Proceedings of the Network and Distributed Security Symposium--NDSS '08},
   4725         year = {2008},
   4726         month = feb,
   4727         publisher = {Internet Society},
   4728         organization = {Internet Society},
   4729         abstract = {The Tor anonymous communication network uses selfreported bandwidth values to select routers for building tunnels. Since tunnels are allocated in proportion to this bandwidth, this allows a malicious router operator to attract tunnels for compromise. Since the metric used is insensitive to relative load, it does not adequately respond to changing conditions and hence produces unreliable performance, driving many users away. We propose an opportunistic bandwidth measurement algorithm to replace selfreported values and address both of these problems. We also propose a mechanisms to let users tune Tor performance to achieve higher performance or higher anonymity. Our mechanism effectively blends the traffic from users of different preferences, making partitioning attacks difficult. We implemented the opportunistic measurement and tunable performance extensions and examined their performance both analytically and in the real Tor network. Our results show that users can get dramatic increases in either performance or anonymity with little to no sacrifice in the other metric, or a more modest improvement in both. Our mechanisms are also invulnerable to the previously published low-resource attacks on Tor },
   4730         www_section = {anonymity, Tor},
   4731         doi = {10.1109/NCM.2009.205},
   4732         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.7368},
   4733         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/snader08.pdf},
   4734         author = {Robin Snader and Borisov, Nikita}
   4735 }
   4736 @conference {2008_13,
   4737         title = {The Underlay Abstraction in the Spontaneous Virtual Networks (SpoVNet) Architecture},
   4738         booktitle = {Proc. 4th EuroNGI Conf. on Next Generation Internet Networks (NGI 2008)},
   4739         year = {2008},
   4740         pages = {115--122},
   4741         address = {Krakow, Poland},
   4742         abstract = {Next generation networks will combine many heterogeneous access technologies to provide services to a large number of highly mobile users while meeting their demands for quality of service, robustness, and security. Obviously, this is not a trivial task and many protocols fulfilling some combination of these requirements have been proposed. However, non of the current proposals meets all requirements, and the deployment of new applications and services is hindered by a patchwork of protocols. This paper presents Spontaneous Virtual Networks (SpoVNet), an architecture that fosters the creation of new applications and services for next generation networks by providing an underlay abstraction layer. This layer applies an overlay-based approach to cope with mobility, multi-homing, and heterogeneity. For coping with network mobility, it uses a SpoVNet-specific addressing scheme, splitting node identifiers from network locators and providing persistent connections by transparently switching locators. To deal with multihoming it transparently chooses the most appropriate pair of network locators for each connection. To cope with network and protocol heterogeneity, it uses dedicated overlay nodes, e.g., for relaying between IPv4 and IPv6 hosts},
   4743         www_section = {heterogeneity, robustness},
   4744         url = {http://www.tm.uka.de/itm/WebMan/view.php?view=publikationen_detail\&id=283},
   4745         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/underlayabs-ngi08-final.pdf},
   4746         author = {Roland Bless and H{\"u}bsch, Christian and Sebastian Mies and Oliver Waldhorst}
   4747 }
   4748 @article {2008_14,
   4749         title = {Unerkannt. Anonymisierende Peer-to-Peer-Netze im {\"U}berblick},
   4750         journal = {iX magazin f{\"u}r professionelle informationstechnik},
   4751         year = {2008},
   4752         type = {Survey},
   4753         url = {http://www.heise.de/kiosk/archiv/ix/2008/9/88_Anonyme-Peer-to-Peer-Netze-im-Ueberblick},
   4754         author = {Nils Durner and Nathan S Evans and Christian Grothoff}
   4755 }
   4756 @article {2008_15,
   4757         title = {What Can We Learn Privately?},
   4758         journal = {CoRR},
   4759         volume = {abs/0803.0924},
   4760         year = {2008},
   4761         abstract = {Learning problems form an important category of computational tasks that generalizes many of the computations researchers apply to large real-life data sets. We ask: what concept classes can be learned privately, namely, by an algorithm whose output does not depend too heavily on any one input or specific training example? More precisely, we investigate learning algorithms that satisfy differential privacy, a notion that provides strong confidentiality guarantees in contexts where aggregate information is released about a database containing sensitive information about individuals. We demonstrate that, ignoring computational constraints, it is possible to privately agnostically learn any concept class using a sample size approximately logarithmic in the cardinality of the concept class. Therefore, almost anything learnable is learnable privately: specifically, if a concept class is learnable by a (non-private) algorithm with polynomial sample complexity and output size, then it can be learned privately using a polynomial number of samples. We also present a computationally efficient private PAC learner for the class of parity functions. Local (or randomized response) algorithms are a practical class of private algorithms that have received extensive investigation. We provide a precise characterization of local private learning algorithms. We show that a concept class is learnable by a local algorithm if and only if it is learnable in the statistical query (SQ) model. Finally, we present a separation between the power of interactive and noninteractive local learning algorithms},
   4762         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WhatCanWeLearnPrivately2008Kasiviswanthan.pdf},
   4763         author = {Shiva Prasad Kasiviswanathan and Homin K. Lee and Kobbi Nissim and Sofya Raskhodnikova and Adam Smith}
   4764 }
   4765 @conference {Jian:2008:WSP:1409540.1409546,
   4766         title = {Why Share in Peer-to-Peer Networks?},
   4767         booktitle = {EC'08. Proceedings of the 10th International Conference on Electronic Commerce},
   4768         series = {ICEC '08},
   4769         year = {2008},
   4770         month = aug,
   4771         pages = {4:1--4:8},
   4772         publisher = {ACM},
   4773         organization = {ACM},
   4774         address = {Innsbruck, Austria},
   4775         abstract = {Prior theory and empirical work emphasize the enormous free-riding problem facing peer-to-peer (P2P) sharing networks. Nonetheless, many P2P networks thrive. We explore two possible explanations that do not rely on altruism or explicit mechanisms imposed on the network: direct and indirect private incentives for the provision of public goods. The direct incentive is a traffic redistribution effect that advantages the sharing peer. We find this incentive is likely insufficient to motivate equilibrium content sharing in large networks. We then approach P2P networks as a graph-theoretic problem and present sufficient conditions for sharing and free-riding to co-exist due to indirect incentives we call generalized reciprocity},
   4776         www_section = {file-sharing, networks, P2P, peer-to-peer networking},
   4777         isbn = {978-1-60558-075-3},
   4778         doi = {http://doi.acm.org/10.1145/1409540.1409546},
   4779         url = {http://doi.acm.org/10.1145/1409540.1409546},
   4780         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2708\%20-\%20Why\%20share\%20in\%20peer-to-peer\%20networks.pdf},
   4781         author = {Jian, Lian and MacKie-Mason, Jeffrey K.}
   4782 }
   4783 @conference {Garbacki:2007:ATP:1270401.1271766_0,
   4784         title = {An Amortized Tit-For-Tat Protocol for Exchanging Bandwidth instead of Content in P2P Networks},
   4785         booktitle = {SASO 2007. Proceedings of the First International Conference on Self-Adaptive and Self-Organizing Systems},
   4786         series = {SASO '07},
   4787         year = {2007},
   4788         month = jul,
   4789         pages = {119--128},
   4790         publisher = {IEEE Computer Society},
   4791         organization = {IEEE Computer Society},
   4792         address = {Boston, Massachusetts},
   4793         abstract = {Incentives for resource sharing are crucial for the proper operation of P2P networks. The principle of the incentive mechanisms in current content sharing P2P networks such as BitTorrent is to have peers exchange content of mutual interest. As a consequence, a peer can actively participate in the system only if it shares content that is of immediate interest to other peers. In this paper we propose to lift this restriction by using bandwidth rather than content as the resource upon which incentives are based. Bandwidth, in contrast to content, is independent of peer interests and so can be exchanged between any two peers. We present the design of a protocol called amortized tit-for-tat (ATFT) based on the bandwidth-exchange concept. This protocol defines mechanisms for bandwidth exchange corresponding to those in BitTorrent for content exchange, in particular for finding bandwidth borrowers that amortize the bandwidth borrowed in the past with their currently idle bandwidth. In addition to the formally proven incentives for bandwidth contributions, ATFT provides natural solutions to the problems of peer bootstrapping, seeding incentive, peer link asymmetry, and anonymity, which have previously been addressed with much more complex designs. Experiments with a realworld dataset confirm that ATFT is efficient in enforcing bandwidth contributions and results in download performance better than provided by incentive mechanisms based on content exchange},
   4794         www_section = {bandwidth exchange, p2p network, resource sharing, tit-for-tat},
   4795         isbn = {0-7695-2906-2},
   4796         doi = {http://dx.doi.org/10.1109/SASO.2007.9},
   4797         url = {http://dx.doi.org/10.1109/SASO.2007.9},
   4798         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SASO\%2707\%20-\%20Garbacki\%2C\%20Epema\%20\%26\%20van\%20Steen.pdf},
   4799         author = {Garbacki, Pawel and Epema, Dick H. J. and van Steen, Maarten}
   4800 }
   4801 @conference {Garbacki:2007:ATP:1270401.1271766,
   4802         title = {An Amortized Tit-For-Tat Protocol for Exchanging Bandwidth instead of Content in P2P Networks},
   4803         booktitle = {SASO 2007. Proceedings of the First International Conference on Self-Adaptive and Self-Organizing Systems},
   4804         series = {SASO '07},
   4805         year = {2007},
   4806         month = jul,
   4807         pages = {119--128},
   4808         publisher = {IEEE Computer Society},
   4809         organization = {IEEE Computer Society},
   4810         address = {Boston, Massachusetts},
   4811         abstract = {Incentives for resource sharing are crucial for the proper operation of P2P networks. The principle of the incentive mechanisms in current content sharing P2P networks such as BitTorrent is to have peers exchange content of mutual interest. As a consequence, a peer can actively participate in the system only if it shares content that is of immediate interest to other peers. In this paper we propose to lift this restriction by using bandwidth rather than content as the resource upon which incentives are based. Bandwidth, in contrast to content, is independent of peer interests and so can be exchanged between any two peers. We present the design of a protocol called amortized tit-for-tat (ATFT) based on the bandwidth-exchange concept. This protocol defines mechanisms for bandwidth exchange corresponding to those in BitTorrent for content exchange, in particular for finding bandwidth borrowers that amortize the bandwidth borrowed in the past with their currently idle bandwidth. In addition to the formally proven incentives for bandwidth contributions, ATFT provides natural solutions to the problems of peer bootstrapping, seeding incentive, peer link asymmetry, and anonymity, which have previously been addressed with much more complex designs. Experiments with a realworld dataset confirm that ATFT is efficient in enforcing bandwidth contributions and results in download performance better than provided by incentive mechanisms based on content exchange},
   4812         isbn = {0-7695-2906-2},
   4813         doi = {http://dx.doi.org/10.1109/SASO.2007.9},
   4814         url = {http://dx.doi.org/10.1109/SASO.2007.9},
   4815         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SASO\%2707\%20-\%20Garbacki\%2C\%20Epema\%20\%26\%20van\%20Steen.pdf},
   4816         author = {Garbacki, Pawel and Epema, Dick H. J. and van Steen, Maarten}
   4817 }
   4818 @article {2007_0,
   4819         title = {Analyzing Peer Behavior in KAD},
   4820         number = {RR-07-205},
   4821         year = {2007},
   4822         month = oct,
   4823         institution = {Institut Eurecom},
   4824         type = {Tech report},
   4825         address = {Sophia Antipolis},
   4826         abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHTs have been implemented in real systems and deployed on a large scale. One exception is KAD, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling KAD continuously for about six months and obtained information about geographical distribution of peers, session times, peer availability, and peer lifetime. We also evaluated to what extent information about past peer uptime can be used to predict the remaining uptime of the peer. Peers are identified by the so called KAD ID, which was up to now as- sumed to remain the same across sessions. However, we observed that this is not the case: There is a large number of peers, in particular in China, that change their KAD ID, sometimes as frequently as after each session. This change of KAD IDs makes it difficult to characterize end-user availability or membership turnover. By tracking end-users with static IP addresses, we could measure the rate of change of KAD ID per end-user},
   4827         www_section = {distributed hash table, KAD, peer behavior},
   4828         journal = {unknown},
   4829         issn = {RR-07-205},
   4830         url = {http://www.eurecom.fr/~btroup/kadtraces/},
   4831         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20Analyzing\%20peer\%20behavior\%20in\%20KAD.pdf},
   4832         author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}
   4833 }
   4834 @booklet {VenHeTon07,
   4835         title = {Anonymous Networking amidst Eavesdroppers},
   4836         year = {2007},
   4837         month = {October},
   4838         abstract = {The problem of security against packet timing based traffic analysis in wireless networks is considered in this work. An analytical measure of "anonymity" of routes in eavesdropped networks is proposed using the information-theoretic equivocation. For a physical layer with orthogonal transmitter directed signaling, scheduling and relaying techniques are designed to maximize achievable network performance for any desired level of anonymity. The network performance is measured by the total rate of packets delivered from the sources to destinations under strict latency and medium access constraints. In particular, analytical results are presented for two scenarios: For a single relay that forwards packets from m users, relaying strategies are provided that minimize the packet drops when the source nodes and the relay generate independent transmission schedules. A relay using such an independent scheduling strategy is undetectable by an eavesdropper and is referred to as a covert relay. Achievable rate regions are characterized under strict and average delay constraints on the traffic, when schedules are independent Poisson processes. For a multihop network with an arbitrary anonymity requirement, the problem of maximizing the sum-rate of flows (network throughput) is considered. A randomized selection strategy to choose covert relays as a function of the routes is designed for this purpose. Using the analytical results for a single covert relay, the strategy is optimized to obtain the maximum achievable throughput as a function of the desired level of anonymity. In particular, the throughput-anonymity relation for the proposed strategy is shown to be equivalent to an information-theoretic rate-distortion function},
   4839         www_section = {Rate-Distortion, secrecy, traffic analysis},
   4840         url = {http://cat.inist.fr/?aModele=afficheN\&cpsidt=20411836},
   4841         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0710.4903v1.pdf},
   4842         author = {Parvathinathan Venkitasubramaniam and Ting He and Lang Tong}
   4843 }
   4844 @conference {di07mass,
   4845         title = {Application of DHT-Inspired Routing for Object Tracking},
   4846         booktitle = {Proceedings of 4th IEEE International Conference on Mobile Ad-hoc and Sensor Systems},
   4847         year = {2007},
   4848         type = {publication},
   4849         address = {Pisa, Italy},
   4850         abstract = {A major problem in tracking objects in sensor networks is trading off update traffic and timeliness of the data that is available to a monitoring site. Typically, either all objects regularly update some central registry with their location information, or the monitoring instance floods the network with a request when it needs information for a particular object. More sophisticated approaches use a P2P-like distributed storage structure on top of geographic routing. The applicability of the latter is limited to certain topologies, and having separate storage and routing algorithms reduces efficiency. In this paper, we present a different solution which is based on the scalable source routing (SSR) protocol. SSR is a network layer routing protocol that has been inspired by distributed hash tables (DHT). It provides key-based routing in large networks of resource-limited devices such as sensor networks. We argue that this approach is more suitable for object tracking in sensor networks because it evenly spreads the updates over the whole network without being limited to a particular network topology. We support our argument with extensive simulations},
   4851         www_section = {distributed hash table, scalable source routing},
   4852         isbn = {978-1-4244-1454-3},
   4853         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   4854         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di07mass.pdf},
   4855         author = {Di, Pengfei and Yaser Houri and Qing Wei and J{\"o}rg Widmer and Thomas Fuhrmann}
   4856 }
   4857 @conference {Ostrovsky:2007:AEN:1315245.1315270,
   4858         title = {Attribute-based encryption with non-monotonic access structures},
   4859         booktitle = {CCS'07--Proceedings of the 14th ACM Conference on Computer and Communications Security},
   4860         series = {CCS '07},
   4861         year = {2007},
   4862         month = oct,
   4863         pages = {195--203},
   4864         publisher = {ACM},
   4865         organization = {ACM},
   4866         address = {Alexandria, VA, USA},
   4867         abstract = {We construct an Attribute-Based Encryption (ABE) scheme that allows a user's private key to be expressed in terms of any access formula over attributes. Previous ABE schemes were limited to expressing only monotonic access structures. We provide a proof of security for our scheme based on the Decisional Bilinear Diffie-Hellman (BDH) assumption. Furthermore, the performance of our new scheme compares favorably with existing, less-expressive schemes},
   4868         www_section = {ABE, BDH, Decisional bilinear diffie-hellman, encryption, non-monotonic access},
   4869         isbn = {978-1-59593-703-2},
   4870         doi = {http://doi.acm.org/10.1145/1315245.1315270},
   4871         url = {http://doi.acm.org/10.1145/1315245.1315270},
   4872         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2707\%20-\%20ABE\%20with\%20non-monotonic\%20access\%20structures.pdf},
   4873         publisher = {unknown},
   4874         author = {Rafail Ostrovsky and Amit Sahai and Waters, Brent}
   4875 }
   4876 @book {2007_1,
   4877         title = {B.A.T.M.A.N Status Report},
   4878         year = {2007},
   4879         abstract = {This report documents the current status of the development and implementation of the B.A.T.M.A.N (better approach to mobile ad-hoc networking) routing protocol. B.A.T.M.A.N uses a simple and robust algorithm for establishing multi-hop routes in mobile ad-hoc networks.It ensures highly adaptive and loop-free routing while causing only low processing and traffic cost},
   4880         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/batman-status.pdf},
   4881         publisher = {unknown},
   4882         author = {Axel Neumann and Corinna Elektra Aichele and Marek Lindner}
   4883 }
   4884 @conference {ccs07-blac,
   4885         title = {Blacklistable Anonymous Credentials: Blocking Misbehaving Users without TTPs},
   4886         booktitle = {Proceedings of CCS 2007},
   4887         year = {2007},
   4888         month = {October},
   4889         publisher = {ACM  New York, NY, USA},
   4890         organization = {ACM  New York, NY, USA},
   4891         abstract = {Several credential systems have been proposed in which users can authenticate to services anonymously. Since anonymity can give users the license to misbehave, some variants allow the selective deanonymization (or linking) of misbehaving users upon a complaint to a trusted third party (TTP). The ability of the TTP to revoke a user's privacy at any time, however, is too strong a punishment for misbehavior. To limit the scope of deanonymization, systems such as "e-cash" have been proposed in which users are deanonymized under only certain types of well-defined misbehavior such as "double spending." While useful in some applications, it is not possible to generalize such techniques to more subjective definitions of misbehavior.
   4892 
   4893 We present the first anonymous credential system in which services can "blacklist" misbehaving users without contacting a TTP. Since blacklisted users remain anonymous, misbehaviors can be judged subjectively without users fearing arbitrary deanonymization by a TTP},
   4894         www_section = {privacy, revocation, user misbehavior},
   4895         isbn = {978-1-59593-703-2},
   4896         doi = {10.1145/1315245.1315256},
   4897         url = {http://portal.acm.org/citation.cfm?id=1315245.1315256},
   4898         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs07-blac.pdf},
   4899         author = {Patrick P. Tsang and Man Ho Au and Apu Kapadia and Sean Smith}
   4900 }
   4901 @article {Terpstra:2007:BRP:1282427.1282387,
   4902         title = {Bubblestorm: resilient, probabilistic, and exhaustive peer-to-peer search},
   4903         journal = {SIGCOMM Computer Communication Review},
   4904         volume = {37},
   4905         year = {2007},
   4906         month = aug,
   4907         pages = {49--60},
   4908         publisher = {ACM},
   4909         address = {New York, NY, USA},
   4910         abstract = {Peer-to-peer systems promise inexpensive scalability, adaptability, and robustness. Thus, they are an attractive platform for file sharing, distributed wikis, and search engines. These applications often store weakly structured data, requiring sophisticated search algorithms. To simplify the search problem, most scalable algorithms introduce structure to the network. However, churn or violent disruption may break this structure, compromising search guarantees.
   4911 
   4912 This paper proposes a simple probabilistic search system, BubbleStorm, built on random multigraphs. Our primary contribution is a flexible and reliable strategy for performing exhaustive search. BubbleStorm also exploits the heterogeneous bandwidth of peers. However, we sacrifice some of this bandwidth for high parallelism and low latency. The provided search guarantees are tunable, with success probability adjustable well into the realm of reliable systems.
   4913 
   4914 For validation, we simulate a network with one million low-end peers and show BubbleStorm handles up to 90\% simultaneous peer departure and 50\% simultaneous crash},
   4915         www_section = {exhaustive search, peer-to-peer networking, resilience, simulation},
   4916         issn = {0146-4833},
   4917         doi = {http://doi.acm.org/10.1145/1282427.1282387},
   4918         url = {http://doi.acm.org/10.1145/1282427.1282387},
   4919         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Computers\%20Communication\%20Review\%20-\%20Bubblestorm.pdf},
   4920         author = {Terpstra, Wesley W. and Jussi Kangasharju and Leng, Christof and Buchmann, Alejandro P.}
   4921 }
   4922 @booklet {cosic-2007-001,
   4923         title = {The Byzantine Postman Problem: A Trivial Attack Against PIR-based Nym Servers},
   4924         number = {ESAT-COSIC 2007-001},
   4925         year = {2007},
   4926         month = feb,
   4927         publisher = {Katholieke Universiteit Leuven},
   4928         abstract = {Over the last several decades, there have been numerous proposals for systems which can preserve the anonymity of the recipient of some data. Some have involved trusted third-parties or trusted hardware; others have been constructed on top of link-layer anonymity systems or mix-nets. In this paper, we evaluate a pseudonymous message system which takes the different approach of using Private Information Retrieval (PIR) as its basis. We expose a flaw in the system as presented: it fails to identify Byzantine servers. We provide suggestions on correcting the flaw, while observing the security and performance trade-offs our suggestions require},
   4929         www_section = {anonymity, private information retrieval, pseudonym},
   4930         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.1013},
   4931         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cosic-2007-001.pdf},
   4932         author = {Len Sassaman and Bart Preneel}
   4933 }
   4934 @conference {1759877,
   4935         title = {CFR: a peer-to-peer collaborative file repository system},
   4936         booktitle = {GPC'07: Proceedings of the 2nd international conference on Advances in grid and pervasive computing},
   4937         year = {2007},
   4938         pages = {100--111},
   4939         publisher = {Springer-Verlag},
   4940         organization = {Springer-Verlag},
   4941         address = {Berlin, Heidelberg},
   4942         abstract = {Due to the high availability of the Internet, many large cross-organization collaboration projects, such as SourceForge, grid systems etc., have emerged. One of the fundamental requirements of these collaboration efforts is a storage system to store and exchange data. This storage system must be highly scalable and can efficiently aggregate the storage resources contributed by the participating organizations to deliver good performance for users. In this paper, we propose a storage system, Collaborative File Repository (CFR), for large scale collaboration projects. CFR uses peer-to-peer techniques to achieve scalability, efficiency, and ease of management. In CFR, storage nodes contributed by the participating organizations are partitioned according to geographical regions. Files stored in CFR are automatically replicated to all regions. Furthermore, popular files are duplicated to other storage nodes of the same region. By doing so, data transfers between users and storage nodes are confined within their regions and transfer efficiency is enhanced. Experiments show that our replication can achieve high efficiency with a small number of duplicates},
   4943         www_section = {P2P, storage},
   4944         isbn = {978-3-540-72359-2},
   4945         url = {http://portal.acm.org/citation.cfm?id=1759877\&dl=GUIDE\&coll=GUIDE$\#$},
   4946         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.7110.pdf},
   4947         author = {Lin, Meng-Ru and Lu, Ssu-Hsuan and Ho, Tsung-Hsuan and Lin, Peter and Chung, Yeh-Ching}
   4948 }
   4949 @mastersthesis {1329865,
   4950         title = {Cheat-proof event ordering for large-scale distributed multiplayer games},
   4951         year = {2007},
   4952         note = {Adviser-Lo, Virginia},
   4953         school = {University of Oregon},
   4954         type = {phd},
   4955         address = {Eugene, OR, USA},
   4956         abstract = {Real-time, interactive, multi-user (RIM) applications are networked applications that allow users to collaborate and interact with each other over the Internet for work, education and training, or entertainment purposes. Multiplayer games, distance learning applications, collaborative whiteboards, immersive educational and training simulations, and distributed interactive simulations are examples of these applications. Of these RIM applications, multiplayer games are an important class for research due to their widespread deployment and popularity on the Internet. Research with multiplayer games will have a direct impact on all RIM applications.
   4957 
   4958 While large-scale multiplayer games have typically used a client/server architecture for network communication, we propose using a peer-to-peer architecture to solve the scalability problems inherent in centralized systems. Past research and actual deployments of peer-to-peer networks show that they can scale to millions of users. However, these prior peer-to-peer networks do not meet the low latency and interactive requirements that multi-player games need. Indeed, the fundamental problem of maintaining consistency between all nodes in the face of failures, delays, and malicious attacks has to be solved to make a peer-to-peer networks a viable solution.
   4959 
   4960 We propose solving the consistency problem through secure and scalable event ordering. While traditional event ordering requires all-to-all message passing and at least two rounds of communication, we argue that multiplayer games lend themselves naturally to a hierarchical decomposition of their state space so that we can reduce the communication cost of event ordering. We also argue that by using cryptography, a discrete view of time, and majority voting, we can totally order events in a real-time setting. By applying these two concepts, we can scale multiplayer games to millions of players.
   4961 
   4962 We develop our solution in two parts: a cheat-proof and real-time event ordering protocol and a scalable, hierarchical structure that organizes peers in a tree according to their scope of interest in the game. Our work represents the first, complete solution to this problem and we show through both proofs and simulations that our protocols allow the creation of large-scale, peer-to-peer games that are resistant to cheating while maintaining real-time responsiveness in the system},
   4963         url = {http://portal.acm.org/citation.cfm?id=1329865$\#$},
   4964         author = {Chis GauthierDickey}
   4965 }
   4966 @article {Lee2007CISS,
   4967         title = {CISS: An efficient object clustering framework for DHT-based peer-to-peer applications},
   4968         journal = {Comput. Netw},
   4969         volume = {51},
   4970         number = {4},
   4971         year = {2007},
   4972         pages = {1072--1094},
   4973         publisher = {Elsevier North-Holland, Inc},
   4974         address = {New York, NY, USA},
   4975         www_section = {distributed hash table, load balancing, Multi-dimensional range query, Object clustering, Peer-to-peer application},
   4976         issn = {1389-1286},
   4977         doi = {10.1016/j.comnet.2006.07.005},
   4978         url = {http://dx.doi.org/10.1016/j.comnet.2006.07.005},
   4979         author = {Lee, Jinwon and Lee, Hyonik and Kang, Seungwoo and Kim, Su Myeon and Song, Junehwa}
   4980 }
   4981 @conference {conf/acsac/ADC07,
   4982         title = {Closed-Circuit Unobservable Voice Over IP},
   4983         booktitle = {Proceedings of 23rd Annual Computer Security Applications Conference (ACSAC'07), Miami, FL, USA},
   4984         year = {2007},
   4985         publisher = {IEEE Computer Society Press},
   4986         organization = {IEEE Computer Society Press},
   4987         abstract = {Among all the security issues in Voice over IP (VoIP) communications, one of the most difficult to achieve is traf- fic analysis resistance. Indeed, classical approaches pro- vide a reasonable degree of security but induce large round- trip times that are incompatible with VoIP. In this paper, we describe some of the privacy and secu- rity issues derived from traffic analysis in VoIP. We also give an overview of how to provide low-latency VoIP communi- cation with strong resistance to traffic analysis. Finally, we present a server which can provide such resistance to hun- dreds of users even if the server is compromised},
   4988         www_section = {latency, unobservability, VoIP},
   4989         isbn = {0-7695-3060-5},
   4990         doi = {10.1109/ACSAC.2007.34},
   4991         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/ACSAC.2007.34},
   4992         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ADC07.pdf},
   4993         author = {Carlos Aguilar Melchor and Yves Deswarte and Julien Iguchi-Cartigny}
   4994 }
   4995 @article {EdmanSY07,
   4996         title = {A Combinatorial Approach to Measuring Anonymity},
   4997         journal = {Intelligence and Security Informatics, 2007 IEEE},
   4998         year = {2007},
   4999         month = {May},
   5000         pages = {356--363},
   5001         abstract = {In this paper we define a new metric for quantifying the degree of anonymity collectively afforded to users of an anonymous communication system. We show how our metric, based on the permanent of a matrix, can be useful in evaluating the amount of information needed by an observer to reveal the communication pattern as a whole. We also show how our model can be extended to include probabilistic information learned by an attacker about possible sender-recipient relationships. Our work is intended to serve as a complementary tool to existing information-theoretic metrics, which typically consider the anonymity of the system from the perspective of a single user or message},
   5002         www_section = {anonymity},
   5003         isbn = {142441329X},
   5004         doi = {10.1109/ISI.2007.379497},
   5005         url = {http://www.mendeley.com/research/a-combinatorial-approach-to-measuring-anonymity/},
   5006         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EdmanSY07.pdf},
   5007         author = {Matthew Edman and Fikret Sivrikaya and B{\"u}lent Yener}
   5008 }
   5009 @article {1273450,
   5010         title = {On compact routing for the internet},
   5011         journal = {SIGCOMM Comput. Commun. Rev},
   5012         volume = {37},
   5013         number = {3},
   5014         year = {2007},
   5015         pages = {41--52},
   5016         publisher = {ACM},
   5017         address = {New York, NY, USA},
   5018         abstract = {The Internet's routing system is facing stresses due to its poor fundamental scaling properties. Compact routing is a research field that studies fundamental limits of routing scalability and designs algorithms that try to meet these limits. In particular, compact routing research shows that shortest-path routing, forming a core of traditional routing algorithms, cannot guarantee routing table (RT) sizes that on all network topologies grow slower than linearly as functions of the network size. However, there are plenty of compact routing schemes that relax the shortest-path requirement and allow for improved, sublinear RT size scaling that is mathematically provable for all static network topologies. In particular, there exist compact routing schemes designed for grids, trees, and Internet-like topologies that offer RT sizes that scale logarithmically with the network size.
   5019 
   5020 In this paper, we demonstrate that in view of recent results in compact routing research, such logarithmic scaling on Internet-like topologies is fundamentally impossible in the presence of topology dynamics or topology-independent (flat) addressing. We use analytic arguments to show that the number of routing control messages per topology change cannot scale better than linearly on Internet-like topologies. We also employ simulations to confirm that logarithmic RT size scaling gets broken by topology-independent addressing, a cornerstone of popular locator-identifier split proposals aiming at improving routing scaling in the presence of network topology dynamics or host mobility. These pessimistic findings lead us to the conclusion that a fundamental re-examination of assumptions behind routing models and abstractions is needed in order to find a routing architecture that would be able to scale "indefinitely},
   5021         www_section = {compact routing, internet routing, routing scalability},
   5022         issn = {0146-4833},
   5023         doi = {10.1145/1273445.1273450},
   5024         url = {http://portal.acm.org/citation.cfm?id=1273450$\#$},
   5025         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.5763.pdf},
   5026         author = {Krioukov, Dmitri and Fall, Kevin and Brady, Arthur}
   5027 }
   5028 @mastersthesis {2007_2,
   5029         title = {Cooperative Data Backup for Mobile Devices},
   5030         volume = {Ph.D},
   5031         year = {2007},
   5032         month = mar,
   5033         abstract = {Mobile devices such as laptops, PDAs and cell phones are increasingly relied on but are used in contexts that put them at risk of physical damage, loss or theft. However, few mechanisms are available to reduce the risk of losing the data stored on these devices. In this dissertation, we try to address this concern by designing a cooperative backup service for mobile devices. The service leverages encounters and spontaneous interactions among participating devices, such that each device stores data on behalf of other devices. We first provide an analytical evaluation of the dependability gains of the proposed service. Distributed storage mechanisms are explored and evaluated. Security concerns arising from thecooperation among mutually suspicious principals are identified, and core mechanisms are proposed to allow them to be addressed. Finally, we present our prototype implementation of the cooperative backup service},
   5034         www_section = {backup, dependability, P2P, ubiquitous computing},
   5035         url = {http://ethesis.inp-toulouse.fr/archive/00000544/},
   5036         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/phd-thesis.fr_en.pdf},
   5037         author = {Ludovic Court{\`e}s}
   5038 }
   5039 @conference {fessi-iptcomm2007,
   5040         title = {A cooperative SIP infrastructure for highly reliable telecommunication services},
   5041         booktitle = {IPTComm '07: Proceedings of the 1st international conference on Principles, systems and applications of IP telecommunications},
   5042         year = {2007},
   5043         pages = {29--38},
   5044         publisher = {ACM},
   5045         organization = {ACM},
   5046         address = {New York, NY, USA},
   5047         isbn = {978-1-60558-006-7},
   5048         doi = {http://doi.acm.org/10.1145/1326304.1326310},
   5049         author = {Fessi, Ali and Heiko Niedermayer and Kinkelin, Holger and Carle, Georg}
   5050 }
   5051 @conference {DBLP:conf/esorics/MalleshW07,
   5052         title = {Countering Statistical Disclosure with Receiver-Bound Cover Traffic},
   5053         booktitle = {Proceedings of ESORICS 2007, 12th European Symposium On Research In Computer Security, Dresden, Germany, September 24-26, 2007, Proceedings},
   5054         series = {Lecture Notes in Computer Science},
   5055         volume = {4734},
   5056         year = {2007},
   5057         pages = {547--562},
   5058         publisher = {Springer},
   5059         organization = {Springer},
   5060         abstract = {Anonymous communications provides an important privacy service by keeping passive eavesdroppers from linking communicating parties. However, using long-term statistical analysis of traffic sent to and from such a system, it is possible to link senders with their receivers. Cover traffic is an effective, but somewhat limited, counter strategy against this attack. Earlier work in this area proposes that privacy-sensitive users generate and send cover traffic to the system. However, users are not online all the time and cannot be expected to send consistent levels of cover traffic, drastically reducing the impact of cover traffic. We propose that the mix generate cover traffic that mimics the sending patterns of users in the system. This receiver-bound cover helps to make up for users that aren't there, confusing the attacker. We show through simulation how this makes it difficult for an attacker to discern cover from real traffic and perform attacks based on statistical analysis. Our results show that receiver-bound cover substantially increases the time required for these attacks to succeed. When our approach is used in combination with user-generated cover traffic, the attack takes a very long time to succeed},
   5061         www_section = {anonymity, cover traffic, privacy},
   5062         isbn = {978-3-540-74834-2},
   5063         doi = {10.1007/978-3-540-74835-9},
   5064         url = {http://www.springerlink.com/content/k2146538700m71v7/},
   5065         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MalleshW07.pdf},
   5066         author = {Nayantara Mallesh and Matthew Wright},
   5067         editor = {Joachim Biskup and Javier Lopez}
   5068 }
   5069 @mastersthesis {steven-thesis,
   5070         title = {Covert channel vulnerabilities in anonymity systems},
   5071         year = {2007},
   5072         month = {December},
   5073         school = {University of Cambridge},
   5074         type = {phd},
   5075         abstract = {The spread of wide-scale Internet surveillance has spurred interest in anonymity systems that protect users' privacy by restricting unauthorised access to their identity. This requirement can be considered as a flow control policy in the well established field of multilevel secure systems. I apply previous research on covert channels (unintended means to communicate in violation of a security policy) to analyse several anonymity systems in an innovative way.
   5076 One application for anonymity systems is to prevent collusion in competitions. I show how covert channels may be exploited to violate these protections and construct defences against such attacks, drawing from previous covert channel research and collusion-resistant voting systems.
   5077 In the military context, for which multilevel secure systems were designed, covert channels are increasingly eliminated by physical separation of interconnected single-role computers. Prior work on the remaining network covert channels has been solely based on protocol specifications. I examine some protocol implementations and show how the use of several covert channels can be detected and how channels can be modified to resist detection.
   5078 I show how side channels (unintended information leakage) in anonymity networks may reveal the behaviour of users. While drawing on previous research on traffic analysis and covert channels, I avoid the traditional assumption of an omnipotent adversary. Rather, these attacks are feasible for an attacker with limited access to the network. The effectiveness of these techniques is demonstrated by experiments on a deployed anonymity network, Tor.
   5079 Finally, I introduce novel covert and side channels which exploit thermal effects. Changes in temperature can be remotely induced through CPU load and measured by their effects on crystal clock skew. Experiments show this to be an effective attack against Tor. This side channel may also be usable for geolocation and, as a covert channel, can cross supposedly infallible air-gap security boundaries.
   5080 This thesis demonstrates how theoretical models and generic methodologies relating to covert channels may be applied to find practical solutions to problems in real-world anonymity systems. These findings confirm the existing hypothesis that covert channel analysis, vulnerabilities and defences developed for multilevel secure systems apply equally well to anonymity systems},
   5081         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.62.5142},
   5082         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/steven-thesis.pdf},
   5083         author = {Steven J. Murdoch}
   5084 }
   5085 @conference {ccs07-doa,
   5086         title = {Denial of Service or Denial of Security? How Attacks on Reliability can Compromise Anonymity},
   5087         booktitle = {Proceedings of CCS 2007},
   5088         year = {2007},
   5089         month = {October},
   5090         publisher = {ACM  New York, NY, USA},
   5091         organization = {ACM  New York, NY, USA},
   5092         abstract = {We consider the effect attackers who disrupt anonymous communications have on the security of traditional high- and low-latency anonymous communication systems, as well as on the Hydra-Onion and Cashmere systems that aim to offer reliable mixing, and Salsa, a peer-to-peer anonymous communication network. We show that denial of service (DoS) lowers anonymity as messages need to get retransmitted to be delivered, presenting more opportunities for attack. We uncover a fundamental limit on the security of mix networks, showing that they cannot tolerate a majority of nodes being malicious. Cashmere, Hydra-Onion, and Salsa security is also badly affected by DoS attackers. Our results are backed by probabilistic modeling and extensive simulations and are of direct applicability to deployed anonymity systems},
   5093         www_section = {anonymity, attack, denial-of-service, reliability},
   5094         isbn = {978-1-59593-703-2},
   5095         doi = {10.1145/1315245.1315258},
   5096         url = {http://portal.acm.org/citation.cfm?id=1315258},
   5097         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs07-doa.pdf},
   5098         author = {Borisov, Nikita and George Danezis and Prateek Mittal and Parisa Tabriz}
   5099 }
   5100 @conference {1345798,
   5101         title = {Dependability Evaluation of Cooperative Backup Strategies for Mobile Devices},
   5102         booktitle = {PRDC '07: Proceedings of the 13th Pacific Rim International Symposium on Dependable Computing},
   5103         year = {2007},
   5104         pages = {139--146},
   5105         publisher = {IEEE Computer Society},
   5106         organization = {IEEE Computer Society},
   5107         address = {Washington, DC, USA},
   5108         abstract = {Mobile devices (e.g., laptops, PDAs, cell phones) are increasingly relied on but are used in contexts that put them at risk of physical damage, loss or theft. This paper discusses the dependability evaluation of a cooperative backup service for mobile devices. Participating devices leverage encounters with other devices to temporarily replicate critical data. Permanent backups are created when the participating devices are able to access the fixed infrastructure. Several data replication and scattering strategies are presented,including the use of erasure codes. A methodology to model and evaluate them using Petri nets and Markov chains is described. We demonstrate that our cooperative backup service decreases the probability of data loss by a factor up to the ad hoc to Internet connectivity ratio},
   5109         isbn = {0-7695-3054-0},
   5110         doi = {10.1109/PRDC.2007.29},
   5111         url = {http://portal.acm.org/citation.cfm?id=1345534.1345798$\#$},
   5112         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.8269_0.pdf},
   5113         author = {Ludovic Court{\`e}s and Hamouda, Ossama and Kaaniche, Mohamed and Killijian, Marc-Olivier and Powell, David}
   5114 }
   5115 @conference {wiangsripanawan-acsw07,
   5116         title = {Design principles for low latency anonymous network systems secure against timing attacks},
   5117         booktitle = {Proceedings of the fifth Australasian symposium on ACSW frontiers (ACSW '07)},
   5118         year = {2007},
   5119         pages = {183--191},
   5120         publisher = {Australian Computer Society, Inc},
   5121         organization = {Australian Computer Society, Inc},
   5122         address = {Darlinghurst, Australia, Australia},
   5123         abstract = {Low latency anonymous network systems, such as Tor, were considered secure  against timing attacks when the threat model does not include a global adversary. In this threat model the adversary can only see part of the links in the system. In a recent paper entitled Low-cost traffic analysis of Tor, it was shown that a variant of timing attack that does not require a global adversary can be applied to Tor. More importantly, authors claimed that their attack would work on any low latency anonymous network systems. The implication of the attack is that all low latency anonymous networks will be vulnerable to this attack even if there is no global adversary.
   5124 
   5125 In this paper, we investigate this claim against other low latency anonymous networks, including Tarzan and Morphmix. Our results show that in contrast to the claim of the aforementioned paper, the attack may not be applicable in all cases. Based on our analysis, we draw design principles for secure low latency anonymous network system (also secure against the above attack)},
   5126         www_section = {anonymity, latency, Morphmix, Tarzan, timing attack, Tor},
   5127         isbn = {1-920-68285-X},
   5128         url = {http://portal.acm.org/citation.cfm?id=1274553},
   5129         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wiangsripanawan-acsw07.pdf},
   5130         author = {Rungrat Wiangsripanawan and Willy Susilo and Rei Safavi-Naini}
   5131 }
   5132 @conference {Piatek:2007:IBR:1973430.1973431_0,
   5133         title = {Do incentives build robustness in BitTorrent?},
   5134         booktitle = {NSDI'07. Proceedings of the 4th USENIX Conference on Networked Systems Design Implementation},
   5135         series = {NSDI'07},
   5136         year = {2007},
   5137         month = apr,
   5138         pages = {1--1},
   5139         publisher = {USENIX Association},
   5140         organization = {USENIX Association},
   5141         address = {Cambridge, MA, USA},
   5142         abstract = {A fundamental problem with many peer-to-peer systems is the tendency for users to "free ride"--to consume resources without contributing to the system. The popular file distribution tool BitTorrent was explicitly designed to address this problem, using a tit-for-tat reciprocity strategy to provide positive incentives for nodes to contribute resources to the swarm. While BitTorrent has been extremely successful, we show that its incentive mechanism is not robust to strategic clients. Through performance modeling parameterized by real world traces, we demonstrate that all peers contribute resources that do not directly improve their performance. We use these results to drive the design and implementation of BitTyrant, a strategic BitTorrent client that provides a median 70\% performance gain for a 1 Mbit client on live Internet swarms. We further show that when applied universally, strategic clients can hurt average per-swarm performance compared to today's BitTorrent client implementations},
   5143         www_section = {BitTorrent, free riding, incentives, peer-to-peer networking},
   5144         url = {http://dl.acm.org/citation.cfm?id=1973430.1973431},
   5145         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2707\%20-\%20Do\%20incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf},
   5146         author = {Piatek, Michael and Isdal, Tomas and Anderson, Thomas and Krishnamurthy, Arvind and Venkataramani, Arun}
   5147 }
   5148 @conference {Piatek:2007:IBR:1973430.1973431,
   5149         title = {Do incentives build robustness in BitTorrent?},
   5150         booktitle = {NSDI'07. Proceedings of the 4th USENIX Conference on Networked Systems Design Implementation},
   5151         series = {NSDI'07},
   5152         year = {2007},
   5153         month = apr,
   5154         pages = {1--1},
   5155         publisher = {USENIX Association},
   5156         organization = {USENIX Association},
   5157         address = {Cambridge, MA, USA},
   5158         abstract = {A fundamental problem with many peer-to-peer systems is the tendency for users to "free ride"--to consume resources without contributing to the system. The popular file distribution tool BitTorrent was explicitly designed to address this problem, using a tit-for-tat reciprocity strategy to provide positive incentives for nodes to contribute resources to the swarm. While BitTorrent has been extremely successful, we show that its incentive mechanism is not robust to strategic clients. Through performance modeling parameterized by real world traces, we demonstrate that all peers contribute resources that do not directly improve their performance. We use these results to drive the design and implementation of BitTyrant, a strategic BitTorrent client that provides a median 70\% performance gain for a 1 Mbit client on live Internet swarms. We further show that when applied universally, strategic clients can hurt average per-swarm performance compared to today's BitTorrent client implementations},
   5159         url = {http://dl.acm.org/citation.cfm?id=1973430.1973431},
   5160         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2707\%20-\%20Do\%20incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf},
   5161         author = {Piatek, Michael and Isdal, Tomas and Anderson, Thomas and Krishnamurthy, Arvind and Venkataramani, Arun}
   5162 }
   5163 @conference {diaz-wpes2007,
   5164         title = {Does additional information always reduce anonymity?},
   5165         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society 2007},
   5166         year = {2007},
   5167         month = {October},
   5168         pages = {72--75},
   5169         publisher = {ACM  New York, NY, USA},
   5170         organization = {ACM  New York, NY, USA},
   5171         address = {Alexandria,VA,USA},
   5172         abstract = {We discuss information-theoretic anonymity metrics, that use entropy over the distribution of all possible recipients to quantify anonymity. We identify a common misconception: the entropy of the distribution describing the potentialreceivers does not always decrease given more information.We show the relation of these a-posteriori distributions with the Shannon conditional entropy, which is an average overall possible observations},
   5173         www_section = {anonymity measurement, entropy, mix, user profiles},
   5174         isbn = {978-1-59593-883-1},
   5175         doi = {10.1145/1314333.1314347},
   5176         url = {http://portal.acm.org/citation.cfm?id=1314333.1314347},
   5177         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/diaz-wpes2007.pdf},
   5178         author = {Claudia Diaz and Carmela Troncoso and George Danezis},
   5179         editor = {Ting Yu}
   5180 }
   5181 @booklet {Landsiedel_dynamicmultipath,
   5182         title = {Dynamic Multipath Onion Routing in Anonymous Peer-To-Peer Overlay Networks},
   5183         year = {2007},
   5184         abstract = {Although recent years provided many protocols for anonymous routing in overlay networks, they commonly rely on the same communication paradigm: Onion Routing. In Onion Routing a static tunnel through an overlay network is build via layered encryption. All traffic exchanged by its end points is relayed through this tunnel. In contrast, this paper introduces dynamic multipath Onion Routing to extend the static Onion Routing paradigm. This approach allows each packet exchanged between two end points to travel along a different path. To provide anonymity the first half of this path is selected by the sender and the second half by the receiver of the packet. The results are manifold: First, dynamic multipath Onion Routing increases the resilience against threats, especially pattern and timing based analysis attacks. Second, the dynamic paths reduce the impact of misbehaving and overloaded relays. Finally, inspired by Internet routing, the forwarding nodes do not need to maintain any state about ongoing flows and so reduce the complexity of the router. In this paper, we describe the design of our dynamic Multipath Onion RoutEr (MORE) for peer-to-peer overlay networks, and evaluate its performance. Furthermore, we integrate address virtualization to abstract from Internet addresses and provide transparent support for IP applications. Thus, no application-level gateways, proxies or modifications of applications are required to sanitize protocols from network level information. Acting as an IP-datagram service, our scheme provides a substrate for anonymous communication to a wide range of applications using TCP and UDP},
   5185         www_section = {onion routing, overlay networks, P2P},
   5186         isbn = {978-1-4244-1043-9 },
   5187         url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4410909\%2F4410910\%2F04410930.pdf\%3Farnumber\%3D4410930\&authDecision=-203},
   5188         author = {Olaf Landsiedel and Alexis Pimenidis and Klaus Wehrle}
   5189 }
   5190 @conference {Coulom:2006:ESB:1777826.1777833,
   5191         title = {Efficient selectivity and backup operators in Monte-Carlo tree search},
   5192         booktitle = {CG'06--Proceedings of the 5th international conference on Computers and games},
   5193         series = {CG'06},
   5194         year = {2007},
   5195         pages = {72--83},
   5196         publisher = {Springer-Verlag},
   5197         organization = {Springer-Verlag},
   5198         address = {Turin, Italy},
   5199         abstract = {A Monte-Carlo evaluation consists in estimating a position by averaging the outcome of several random continuations. The method can serve as an evaluation function at the leaves of a min-max tree. This paper presents a new framework to combine tree search with Monte-Carlo evaluation, that does not separate between a min-max phase and a Monte-Carlo phase. Instead of backing-up the min-max value close to the root, and the average value at some depth, a more general backup operator is defined that progressively changes from averaging to minmax as the number of simulations grows. This approach provides a finegrained control of the tree growth, at the level of individual simulations, and allows efficient selectivity. The resulting algorithm was implemented in a 9 {\texttimes} 9 Go-playing program, Crazy Stone, that won the 10th KGS computer-Go tournament},
   5200         www_section = {framework, MCTS, Monte-Carlo Tree Search},
   5201         isbn = {3-540-75537-3, 978-3-540-75537-1},
   5202         url = {http://dl.acm.org/citation.cfm?id=1777826.1777833},
   5203         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CG\%2706\%20-\%20Selectivity\%20and\%20backup\%20operators\%20in\%20MCTS.pdf},
   5204         author = {Coulom, R{\'e}mi}
   5205 }
   5206 @article {Machanavajjhala2007,
   5207         title = {$\ell$-diversity: Privacy beyond k-anonymity},
   5208         journal = {ACM Transactions on Knowledge Discovery from Data (TKDD)},
   5209         volume = {1},
   5210         number = {1},
   5211         year = {2007},
   5212         author = {Ashwin Machanavajjhala and Daniel Kifer and Johannes Gehrke and Muthuramakrishnan Venkitasubramaniam}
   5213 }
   5214 @conference {Tang:2007:ESE:1260204.1260647,
   5215         title = {Empirical Study on the Evolution of PlanetLab},
   5216         booktitle = {ICN'07--Proceedings of the 6th International Conference on Networking},
   5217         year = {2007},
   5218         month = apr,
   5219         pages = {0--64},
   5220         publisher = {IEEE Computer Society},
   5221         organization = {IEEE Computer Society},
   5222         address = {Sainte-Luce, Martinique, France},
   5223         abstract = {PlanetLab is a globally distributed overlay platform that has been increasingly used by researchers to deploy and assess planetary-scale network services. This paper analyzes some particular advantages of PlanetLab, and then investigates its evolution process, geographical node-distribution, and network topological features. The revealed results are helpful for researchers to 1) understand the history of PlanetLab and some of its important properties quantitatively; 2) realize the dynamic of PlanetLab environment and design professional experiments; 3) select stable nodes that possess a high probability to run continuously for a long time; and 4) objectively and in depth evaluate the experimental results},
   5224         www_section = {overlay, PlanetLab, topology},
   5225         isbn = {0-7695-2805-8},
   5226         doi = {10.1109/ICN.2007.40},
   5227         url = {http://dl.acm.org/citation.cfm?id=1260204.1260647},
   5228         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICN\%2707\%20-\%20PlanetLab.pdf},
   5229         author = {Tang, Li and Chen, Yin and Li, Fei and Zhang, Hui and Li, Jun}
   5230 }
   5231 @article {Member_enablingadaptive,
   5232         title = {Enabling Adaptive Video Streaming in P2P Systems},
   5233         journal = {IEEE Communications Magazine},
   5234         volume = {45},
   5235         year = {2007},
   5236         pages = {108--114 },
   5237         abstract = {Peer-to-peer (P2P) systems are becoming increasingly popular due to their ability to deliver large amounts of data at a reduced deployment cost. In addition to fostering the development of novel media applications, P2P systems also represent an interesting alternative paradigm for media streaming applications that can benefit from the inherent self organization and resource scalability available in such environments. This article presents an overview of application and network layer mechanisms that enable successful streaming frameworks in peer-to-peer systems. We describe media delivery architectures that can be deployed over P2P networks to address the specific requirements of streaming applications. In particular, we show how video-streaming applications can benefit from the diversity offered by P2P systems and implement distributed-streaming and scheduling solutions with multi-path packet transmission},
   5238         www_section = {distributed packet scheduling, flexible media encoding, path diversity, peer-to-peer networking},
   5239         issn = {0163-6804},
   5240         doi = {10.1109/MCOM.2007.374427  },
   5241         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Communications\%20Magazine\%20-\%20Video\%20Streaming\%20in\%20P2P\%20Systems.pdf},
   5242         author = {Dan Jurca and Jacob Chakareski and Jean-Paul Wagner and Pascal Frossard}
   5243 }
   5244 @conference {Stathopoulos07end-to-endrouting,
   5245         title = {End-to-end routing for dualradio sensor networks},
   5246         booktitle = {In INFOCOM},
   5247         year = {2007},
   5248         pages = {2252--2260},
   5249         abstract = {Dual-radio, dual-processor nodes are an emerging class of Wireless Sensor Network devices that provide both lowenergy operation as well as substantially increased computational performance and communication bandwidth for applications. In such systems, the secondary radio and processor operates with sufficiently low power that it may remain always vigilant, while the the main processor and primary, high-bandwidth radio remain off until triggered by the application. By exploiting the high energy efficiency of the main processor and primary radio along with proper usage, net operating energy benefits are enabled for applications. The secondary radio provides a constantly available multi-hop network, while paths in the primary network exist only when required. This paper describes a topology control mechanism for establishing an end-to-end path in a network of dual-radio nodes using the secondary radios as a control channel to selectively wake up nodes along the required end-to-end path. Using numerical models as well as testbed experimentation, we show that our proposed mechanism provides significant energy savings of more than 60 \% compared to alternative approaches, and that it incurs only moderately greater application latency},
   5250         www_section = {routing, wireless sensor network},
   5251         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.87.8984},
   5252         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Stathopoulos07a.pdf},
   5253         author = {Thanos Stathopoulos and Heidemann, John and Martin Lukac and Deborah Estrin and William J. Kaiser}
   5254 }
   5255 @conference {Binzenhofer:2007:ECS:1769187.1769257,
   5256         title = {Estimating churn in structured P2P networks},
   5257         booktitle = {ITC-20'07--Proceedings of the 20th International Teletraffic Conference on Managing Traffic Performance in Converged Networks},
   5258         series = {ITC20'07},
   5259         year = {2007},
   5260         month = jun,
   5261         pages = {630--641},
   5262         publisher = {Springer-Verlag},
   5263         organization = {Springer-Verlag},
   5264         address = {Ottawa, Canada},
   5265         abstract = {In structured peer-to-peer (P2P) networks participating peers can join or leave the system at arbitrary times, a process which is known as churn. Many recent studies revealed that churn is one of the main problems faced by any Distributed Hash Table (DHT). In this paper we discuss different possibilities of how to estimate the current churn rate in the system. In particular, we show how to obtain a robust estimate which is independent of the implementation details of the DHT. We also investigate the trade-offs between accuracy, overhead, and responsiveness to changes},
   5266         www_section = {churn, distributed hash table, P2P, peer-to-peer networking},
   5267         isbn = {978-3-540-72989-1},
   5268         url = {http://dl.acm.org/citation.cfm?id=1769187.1769257},
   5269         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ITC-20\%2707\%20-\%20Estimating\%20churn\%20in\%20structured\%20p2p\%20networks.pdf},
   5270         author = {Binzenh{\"o}fer, Andreas and Leibnitz, Kenji}
   5271 }
   5272 @article {10.1109/WOWMOM.2007.4351805,
   5273         title = {A Game Theoretic Model of a Protocol for Data Possession Verification},
   5274         journal = {A World of Wireless, Mobile and Multimedia Networks, International Symposium on},
   5275         year = {2007},
   5276         pages = {1--6},
   5277         publisher = {IEEE Computer Society},
   5278         address = {Los Alamitos, CA, USA},
   5279         abstract = {This paper discusses how to model a protocol for the verification of data possession intended to secure a peer-to-peer storage application. The verification protocol is a primitive for storage assessment, and indirectly motivates nodes to behave cooperatively within the application. The capability of the protocol to enforce cooperation between a data holder and a data owner is proved theoretically by modeling the verification protocol as a Bayesian game, and demonstrating that the solution of the game is an equilibrium where both parties are cooperative},
   5280         www_section = {P2P},
   5281         isbn = {978-1-4244-0992-1},
   5282         doi = {10.1109/WOWMOM.2007.4351805},
   5283         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/WOWMOM.2007.4351805},
   5284         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oualno-070618.pdf},
   5285         author = {Nouha Oualha and Pietro Michiardi and Yves Roudier}
   5286 }
   5287 @conference {AthanRAM07,
   5288         title = {GAS: Overloading a File Sharing Network as an Anonymizing System},
   5289         booktitle = {Proceedings of Second International Workshop on Security, (IWSEC 2007)},
   5290         year = {2007},
   5291         publisher = {Springer Berlin / Heidelberg},
   5292         organization = {Springer Berlin / Heidelberg},
   5293         abstract = {Anonymity is considered as a valuable property as far as everyday transactions in the Internet are concerned. Users care about their privacy and they seek for new ways to keep secret as much as of their personal information from third parties. Anonymizing systems exist nowadays that provide users with the technology, which is able to hide their origin when they use applications such as the World Wide Web or Instant Messaging. However, all these systems are vulnerable to a number of attacks and some of them may collapse under a low strength adversary. In this paper we explore anonymity from a different perspective. Instead of building a new anonymizing system, we try to overload an existing file sharing system, Gnutella, and use it for a different purpose. We develop a technique that transforms Gnutella as an Anonymizing System (GAS) for a single download from the World Wide Web},
   5294         www_section = {anonymity, Gnutella},
   5295         isbn = {978-3-540-75650-7},
   5296         doi = {10.1007/978-3-540-75651-4},
   5297         url = {http://www.springerlink.com/content/8120788t0l354vj6/},
   5298         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AthanRAM07.pdf},
   5299         author = {Elias Athanasopoulos and Mema Roussopoulos and Kostas G. Anagnostakis and Evangelos P. Markatos}
   5300 }
   5301 @conference {Steiner:2007:GVK:1298306.1298323,
   5302         title = {A global view of KAD},
   5303         booktitle = {IMC'07--Proceedings of the 7th ACM SIGCOMM Conference on Internet Measurement},
   5304         series = {IMC '07},
   5305         year = {2007},
   5306         month = oct,
   5307         pages = {117--122},
   5308         publisher = {ACM},
   5309         organization = {ACM},
   5310         address = {San Diego, CA, USA},
   5311         abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHT shave been implemented in real systems and deployed on alarge scale. One exception is <scp>KAD</scp>, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling <scp>KAD</scp> continuously for about six months and obtained information about the total number of peers online and their geographical distribution.
   5312 
   5313 Peers are identified by the so called KAD ID, which was up to now assumed to remain the same across sessions. However, we observed that this is not the case: There is a large number of peers, in particular in China, that change their KAD ID, sometimes as frequently as after each session. This change of KAD IDs makes it difficult to characterize end-user availability or membership turnover},
   5314         www_section = {distributed hash table, lookup, peer-to-peer networking},
   5315         isbn = {978-1-59593-908-1},
   5316         doi = {http://doi.acm.org/10.1145/1298306.1298323},
   5317         url = {http://doi.acm.org/10.1145/1298306.1298323},
   5318         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2707\%20-\%20A\%20global\%20view\%20of\%20KAD.pdf},
   5319         author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}
   5320 }
   5321 @article {2007_3,
   5322         title = {Gossip-based Peer Sampling},
   5323         journal = {ACM Trans. Comput. Syst},
   5324         volume = {25},
   5325         year = {2007},
   5326         abstract = {Gossip-based communication protocols are appealing in large-scale distributed applications such as information dissemination, aggregation, and overlay topology management. This paper factors out a fundamental mechanism at the heart of all these protocols: the peer-sampling service. In short, this service provides every node with peers to gossip with. We promote this service to the level of a first-class abstraction of a large-scale distributed system, similar to a name service being a first-class abstraction of a local-area system. We present a generic framework to implement a peer-sampling service in a decentralized manner by constructing and maintaining dynamic unstructured overlays through gossiping membership information itself. Our framework generalizes existing approaches and makes it easy to discover new ones. We use this framework to empirically explore and compare several implementations of the peer sampling service. Through extensive simulation experiments we show that---although all protocols provide a good quality uniform random stream of peers to each node locally---traditional theoretical assumptions about the randomness of the unstructured overlays as a whole do not hold in any of the instances. We also show that different design decisions result in severe differences from the point of view of two crucial aspects: load balancing and fault tolerance. Our simulations are validated by means of a wide-area implementation},
   5327         www_section = {epidemic protocols, Gossip-based protocols, peer sampling service},
   5328         issn = {0734-2071},
   5329         doi = {10.1145/1275517.1275520},
   5330         url = {http://doi.acm.org/10.1145/1275517.1275520},
   5331         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GossipPeerSampling2007Jelasity.pdf},
   5332         author = {Jelasity, M{\'a}rk and Voulgaris, Spyros and Guerraoui, Rachid and Kermarrec, Anne-Marie and van Steen, Maarten}
   5333 }
   5334 @article {2007_4,
   5335         title = {Gossiping in Distributed Systems},
   5336         journal = {SIGOPS Oper. Syst. Rev},
   5337         volume = {41},
   5338         year = {2007},
   5339         pages = {2--7},
   5340         abstract = {Gossip-based algorithms were first introduced for reliably disseminating data in large-scale distributed systems. However, their simplicity, robustness, and flexibility make them attractive for more than just pure data dissemination alone. In particular, gossiping has been applied to data aggregation, overlay maintenance, and resource allocation. Gossiping applications more or less fit the same framework, with often subtle differences in algorithmic details determining divergent emergent behavior. This divergence is often difficult to understand, as formal models have yet to be developed that can capture the full design space of gossiping solutions. In this paper, we present a brief introduction to the field of gossiping in distributed systems, by providing a simple framework and using that framework to describe solutions for various application domains},
   5341         issn = {0163-5980},
   5342         doi = {10.1145/1317379.1317381},
   5343         url = {http://doi.acm.org/10.1145/1317379.1317381},
   5344         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Gossiping2007Kermarrrec.pdf},
   5345         author = {Kermarrec, Anne-Marie and van Steen, Maarten}
   5346 }
   5347 @article { feldman:hidden-action,
   5348         title = {Hidden-Action in Network Routing},
   5349         journal = {IEEE Journal on Selected Areas in Communications},
   5350         volume = {25},
   5351         year = {2007},
   5352         month = aug,
   5353         pages = {1161--1172},
   5354         abstract = {In communication networks, such as the Internet or mobile ad-hoc networks, the actions taken by intermediate nodes or links are typically hidden from the communicating endpoints; all the endpoints can observe is whether or not the end-to-end transmission was successful. Therefore, in the absence of incentives to the contrary, rational (i.e., selfish) intermediaries may choose to forward messages at a low priority or simply not forward messages at all. Using a principal-agent model, we show how the hidden-action problem can be overcome through appropriate design of contracts in both the direct (the endpoints contract with each individual router directly) and the recursive (each router contracts with the next downstream router) cases. We further show that, depending on the network topology, per-hop or per-path monitoring may not necessarily improve the utility of the principal or the social welfare of the system},
   5355         www_section = {action, communication network, hidden action, network routing},
   5356         issn = {0733-8716 },
   5357         doi = {10.1109/JSAC.2007.070810},
   5358         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Journal\%20\%2825\%29\%20-\%20Hidden-action\%20in\%20network\%20routing.pdf},
   5359         author = {Michal Feldman and John Chuang and Ion Stoica and S Shenker}
   5360 }
   5361 @article {so64132,
   5362         title = {How robust are gossip-based communication protocols?},
   5363         journal = {Operating Systems Review},
   5364         volume = {41},
   5365         number = {5},
   5366         year = {2007},
   5367         month = {October},
   5368         pages = {14--18},
   5369         publisher = {ACM},
   5370         abstract = {Gossip-based communication protocols are often touted as being robust. Not surprisingly, such a claim relies on assumptions under which gossip protocols are supposed to operate. In this paper, we discuss and in some cases expose some of these assumptions and discuss how sensitive the robustness of gossip is to these assumptions. This analysis gives rise to a collection of new research challenges},
   5371         www_section = {robustness},
   5372         issn = {0163-5980},
   5373         doi = {10.1145/1317379.1317383},
   5374         url = {http://doc.utwente.nl/64132/},
   5375         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/robustgossip-final.pdf},
   5376         author = {Lorenzo Alvisi and Jeroen Doumen and Rachid Guerraoui and Boris Koldehofe and Harry Li and Robbert Van Renesse and Gilles Tredan}
   5377 }
   5378 @conference {adida07,
   5379         title = {How to Shuffle in Public},
   5380         booktitle = {Proceedings of the Theory of Cryptography 2007},
   5381         year = {2007},
   5382         month = feb,
   5383         publisher = {Springer Berlin / Heidelberg},
   5384         organization = {Springer Berlin / Heidelberg},
   5385         abstract = {We show how to obfuscate a secret shuffle of ciphertexts: shuffling becomes a public operation. Given a trusted party that samples and obfuscates a shuffle before any ciphertexts are received, this reduces the problem of constructing a mix-net to verifiable joint decryption.
   5386 We construct public-key obfuscations of a decryption shuffle based on the Boneh-Goh-Nissim (BGN) cryptosystem and a re-encryption shuffle based on the Paillier cryptosystem. Both allow efficient distributed verifiable decryption.
   5387 Finally, we give a distributed protocol for sampling and obfuscating each of the above shuffles and show how it can be used in a trivial way to construct a universally composable mix-net. Our constructions are practical when the number of senders N is small, yet large enough to handle a number of practical cases, e.g. N = 350 in the BGN case and N = 2000 in the Paillier case},
   5388         www_section = {public key cryptography, re-encryption},
   5389         isbn = {978-3-540-70935-0},
   5390         doi = {10.1007/978-3-540-70936-7},
   5391         url = {http://www.springerlink.com/content/j6p730488x602r28/},
   5392         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/adida07.pdf},
   5393         author = {Ben Adida and Douglas Wikstr{\"o}m}
   5394 }
   5395 @conference {Delerablee:2007:IBE:1781454.1781471,
   5396         title = {Identity-based broadcast encryption with constant size ciphertexts and private keys},
   5397         booktitle = {ASIACRYPT 2007--Proceedings of the Advances in Cryptology 13th International Conference on Theory and Application of Cryptology and Information Security},
   5398         series = {ASIACRYPT'07},
   5399         year = {2007},
   5400         month = dec,
   5401         pages = {200--215},
   5402         publisher = {Springer-Verlag},
   5403         organization = {Springer-Verlag},
   5404         address = {Kuching, Malaysia},
   5405         abstract = {This paper describes the first identity-based broadcast encryption scheme (IBBE) with constant size ciphertexts and private keys. In our scheme, the public key is of size linear in the maximal size m of the set of receivers, which is smaller than the number of possible users (identities) in the system. Compared with a recent broadcast encryption system introduced by Boneh, Gentry and Waters (BGW), our system has comparable properties, but with a better efficiency: the public key is shorter than in BGW. Moreover, the total number of possible users in the system does not have to be fixed in the setup},
   5406         www_section = {ciphertext, encryption, IBBE, private key},
   5407         isbn = {3-540-76899-8, 978-3-540-76899-9},
   5408         url = {http://dl.acm.org/citation.cfm?id=1781454.1781471},
   5409         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ASIACRYPT\%2707\%20-\%20IBBE\%20with\%20constant\%20size\%20ciphertexts\%20and\%20private\%20keys.pdf},
   5410         author = {Delerabl{\'e}e, C{\'e}cile}
   5411 }
   5412 @article {KongHG07,
   5413         title = {An Identity-Free and On-Demand Routing Scheme against Anonymity Threats in Mobile Ad Hoc Networks},
   5414         journal = {IEEE Transactions on Mobile Computing},
   5415         volume = {6},
   5416         number = {8},
   5417         year = {2007},
   5418         pages = {888--902},
   5419         publisher = {IEEE Computer Society},
   5420         address = {Los Alamitos, CA, USA},
   5421         abstract = {Introducing node mobility into the network also introduces new anonymity threats. This important change of the concept of anonymity has recently attracted attentions in mobile wireless security research. This paper presents identity-free routing and on-demand routing as two design principles of anonymous routing in mobile ad hoc networks. We devise ANODR (ANonymous On-Demand Routing) as the needed anonymous routing scheme that is compliant with the design principles. Our security analysis and simulation study verify the effectiveness and efficiency of ANODR},
   5422         www_section = {ad-hoc networks, anonymity, identity-free routing, neighborhood management, network complexity theory},
   5423         issn = {1536-1233},
   5424         doi = {10.1109/TMC.2007.1021},
   5425         url = {http://portal.acm.org/citation.cfm?id=1272127},
   5426         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/KongHG07.pdf},
   5427         author = {Jiejun Kong and Xiaoyan Hong and Mario Gerla}
   5428 }
   5429 @conference {Infocom2007-SNS,
   5430         title = {Implications of Selfish Neighbor Selection in Overlay Networks},
   5431         booktitle = {Proceedings of IEEE INFOCOM 2007},
   5432         year = {2007},
   5433         month = may,
   5434         address = {Anchorage, AK},
   5435         www_section = {EGOIST, game theory, routing},
   5436         url = {www.cs.bu.edu/techreports/pdf/2006-019-selfish-neighbor-selection.pdf},
   5437         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Infocom2007-sns.pdf},
   5438         author = {Nikolaos Laoutaris and Georgios Smaragdakis and Azer Bestavros and Byers, John W.}
   5439 }
   5440 @conference {overlier-pet2007,
   5441         title = {Improving Efficiency and Simplicity of Tor circuit establishment and hidden services},
   5442         booktitle = {Proceedings of the Seventh Workshop on Privacy Enhancing Technologies (PET 2007)},
   5443         year = {2007},
   5444         month = {June},
   5445         publisher = {Springer},
   5446         organization = {Springer},
   5447         address = {Ottawa, Canada},
   5448         abstract = {In this paper we demonstrate how to reduce the overhead and delay of circuit establishment in the Tor anonymizing network by using predistributed Diffie-Hellman values. We eliminate the use of RSA encryption and decryption from circuit setup, and we reduce the number of DH exponentiations vs. the current Tor circuit setup protocol while maintaining immediate forward secrecy. We also describe savings that can be obtained by precomputing during idle cycles values that can be determined before the protocol starts. We introduce the distinction of eventual vs. immediate forward secrecy and present protocols that illustrate the distinction. These protocols are even more efficient in communication and computation than the one we primarily propose, but they provide only eventual forward secrecy. We describe how to reduce the overhead and the complexity of hidden server connections by using our DH-values to implement valet nodes and eliminate the need for rendezvous points as they exist today. We also discuss the security of the new elements and an analysis of efficiency improvements},
   5449         www_section = {public key cryptography},
   5450         doi = {10.1007/978-3-540-75551-7},
   5451         url = {http://www.springerlink.com/content/j68v312681l8v874/},
   5452         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/overlier-pet2007.pdf},
   5453         author = {Lasse {\O}verlier and Paul Syverson},
   5454         editor = {Borisov, Nikita and Philippe Golle}
   5455 }
   5456 @article {1290327,
   5457         title = {On improving the efficiency of truthful routing in MANETs with selfish nodes},
   5458         journal = {Pervasive Mob. Comput},
   5459         volume = {3},
   5460         number = {5},
   5461         year = {2007},
   5462         pages = {537--559},
   5463         publisher = {Elsevier Science Publishers B. V},
   5464         address = {Amsterdam, The Netherlands, The Netherlands},
   5465         abstract = {In Mobile Ad Hoc Networks (MANETs), nodes depend upon each other for routing and forwarding packets. However, nodes belonging to independent authorities in MANETs may behave selfishly and may not forward packets to save battery and other resources. To stimulate cooperation, nodes are rewarded for their forwarding service. Since nodes spend different cost to forward packets, it is desirable to reimburse nodes according to their cost so that nodes get incentive while the least total payment is charged to the sender. However, to maximize their utility, nodes may tell lie about their cost. This poses the requirement of truthful protocols, which maximizes the utility of nodes only when they declare their true cost. Anderegg and Eidenbenz recently proposed a truthful routing protocol, named ad hoc-VCG. This protocol incurs the route discovery overhead of O(n3), where n is the number of nodes in the network. This routing overhead is likely to become prohibitively large as the network size grows. Moreover, it leads to low network performance due to congestion and interference. We present a low-overhead truthful routing protocol for route discovery in MANETs with selfish nodes by applying mechanism design. The protocol, named LOTTO (Low Overhead Truthful rouTing prOtocol), finds a least cost path for data forwarding with a lower routing overhead of O(n2). We conduct an extensive simulation study to evaluate the performance of our protocol and compare it with ad hoc-VCG. Simulation results show that our protocol provides a much higher packet delivery ratio, generates much lower overhead and has much lower end-to-end delay},
   5466         www_section = {mobile Ad-hoc networks, routing, VCG mechanism},
   5467         issn = {1574-1192},
   5468         doi = {10.1016/j.pmcj.2007.02.001},
   5469         url = {http://portal.acm.org/citation.cfm?id=1290327$\#$},
   5470         author = {Wang, Yongwei and Singhal, Mukesh}
   5471 }
   5472 @conference {goldberg-2007,
   5473         title = {Improving the Robustness of Private Information Retrieval},
   5474         booktitle = {Proceedings of the 2007 IEEE Symposium on Security and Privacy},
   5475         year = {2007},
   5476         month = {May},
   5477         publisher = {IEEE Computer Society  Washington, DC, USA},
   5478         organization = {IEEE Computer Society  Washington, DC, USA},
   5479         abstract = {Since 1995, much work has been done creating protocols for private information retrieval (PIR). Many variants of the basic PIR model have been proposed, including such modifications as computational vs. information-theoretic privacy protection, correctness in the face of servers that fail to respond or that respond incorrectly, and protection of sensitive data against the database servers themselves. In this paper, we improve on the robustness of PIR in a number of ways. First, we present a Byzantine-robust PIR protocol which provides information-theoretic privacy protection against coalitions of up to all but one of the responding servers, improving the previous result by a factor of 3. In addition, our protocol allows for more of the responding servers to return incorrect information while still enabling the user to compute the correct result. We then extend our protocol so that queries have information-theoretic protection if a limited number of servers collude, as before, but still retain computational protection if they all collude. We also extend the protocol to provide information-theoretic protection to the contents of the database against collusions of limited numbers of the database servers, at no additional communication cost or increase in the number of servers. All of our protocols retrieve a block of data with communication cost only O(.) times the size of the block, where . is the number of servers},
   5480         www_section = {private information retrieval, robustness},
   5481         isbn = {0-7695-2848-1},
   5482         doi = {10.1109/SP.2007.23},
   5483         url = {http://portal.acm.org/citation.cfm?id=1264203},
   5484         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/goldberg-2007.pdf},
   5485         author = {Ian Goldberg}
   5486 }
   5487 @conference {slicing07,
   5488         title = {Information Slicing: Anonymity Using Unreliable Overlays},
   5489         booktitle = {Proceedings of the 4th USENIX Symposium on Network Systems Design and Implementation (NSDI)},
   5490         year = {2007},
   5491         month = {April},
   5492         abstract = {This paper proposes a new approach to anonymous communication called information slicing. Typically, anonymizers use onion routing, where a message is encrypted in layers with the public keys of the nodes along the path. Instead, our approach scrambles the message, divides it into pieces, and sends the pieces along disjoint paths. We show that information slicing addresses message confidentiality as well as source and destination anonymity. Surprisingly, it does not need any public key cryptography. Further, our approach naturally addresses the problem of node failures. These characteristics make it a good fit for use over dynamic peer-to-peer overlays. We evaluate the anonymity ofinformation slicing via analysis and simulations. Our prototype implementation on PlanetLab shows that it achieves higher throughput than onion routing and effectively copes with node churn},
   5493         www_section = {anonymity, onion routing, P2P, privacy},
   5494         url = {http://dspace.mit.edu/handle/1721.1/36344a},
   5495         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/slicing07.pdf},
   5496         author = {Sachin Katti and Jeffery Cohen and Dina Katabi}
   5497 }
   5498 @book {2007_5,
   5499         title = {The Iterated Prisoner's Dilemma: 20 Years On},
   5500         series = {Advances in Natural Computation},
   5501         volume = {4},
   5502         year = {2007},
   5503         pages = {0--262},
   5504         publisher = {World Scientific Publishing Co. Pte. Ltd},
   5505         organization = {World Scientific Publishing Co. Pte. Ltd},
   5506         address = {Singapore},
   5507         abstract = {In 1984, Robert Axelrod published a book, relating the story of two competitions which he ran, where invited academics entered strategies for "The Iterated Prisoners' Dilemma". The book, almost 20 years on, is still widely read and cited by academics and the general public. As a celebration of that landmark work, we have recreated those competitions to celebrate its 20th anniversary, by again inviting academics to submit prisoners' dilemma strategies. The first of these new competitions was run in July 2004, and the second in April 2005. "Iterated Prisoners' Dilemma: 20 Years On essentially" provides an update of the Axelrod's book. Specifically, it presents the prisoners' dilemma, its history and variants; highlights original Axelrod's work and its impact; discusses results of new competitions; and, showcases selected papers that reflect the latest researches in the area},
   5508         www_section = {dilemma, iterated prisoners, landmark work},
   5509         isbn = { 978-981-270-697-3 },
   5510         issn = {981-270-697-6},
   5511         author = {Graham Kendall and Xin Yao and Siang Yew Ching}
   5512 }
   5513 @booklet {Iii_keylessjam,
   5514         title = {Keyless Jam Resistance},
   5515         year = {2007},
   5516         abstract = {has been made resistant to jamming by the use of a secret key that is shared by the sender and receiver. There are no known methods for achieving jam resistance without that shared key. Unfortunately, wireless communication is now reaching a scale and a level of importance where such secret-key systems are becoming impractical. For example, the civilian side of the Global Positioning System (GPS) cannot use a shared secret, since that secret would have to be given to all 6.5 billion potential users, and so would no longer be secret. So civilian GPS cannot currently be protected from jamming. But the FAA has stated that the civilian airline industry will transition to using GPS for all navigational aids, even during landings. A terrorist with a simple jamming system could wreak havoc at a major airport. No existing system can solve this problem, and the problem itself has not even been widely discussed. The problem of keyless jam resistance is important. There is a great need for a system that can broadcast messages without any prior secret shared between the sender and receiver. We propose the first system for keyless jam resistance: the BBC algorithm. We describe the encoding, decoding, and broadcast algorithms. We then analyze it for expected resistance to jamming and error rates. We show that BBC can achieve the same level of jam resistance as traditional spread spectrum systems, at just under half the bit rate, and with no shared secret. Furthermore, a hybrid system can achieve the same average bit rate as traditional systems},
   5517         www_section = {GPS},
   5518         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.91.8217},
   5519         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.91.8217.pdf},
   5520         author = {Leemon C. Baird and William L. Bahn and Michael D. Collins and Martin C. Carlisle and Sean C. Butler}
   5521 }
   5522 @conference {DBLP:conf/saint/SaitoMSSM07,
   5523         title = {Local Production, Local Consumption: Peer-to-Peer Architecture for a Dependable and Sustainable Social Infrastructure},
   5524         booktitle = {SAINT'07. Proceedings of the 2007 Symposium on Applications and the Internet},
   5525         year = {2007},
   5526         month = jan,
   5527         pages = {0--58},
   5528         publisher = {IEEE Computer Society},
   5529         organization = {IEEE Computer Society},
   5530         address = {Hiroshima, Japan},
   5531         abstract = {Peer-to-peer (P2P) is a system of overlay networks such that participants can potentially take symmetrical roles.
   5532 This translates itself into a design based on the philosophy of Local Production, Local Consumption (LPLC), originally an agricultural concept to promote sustainable local economy. This philosophy helps enhancing survivability of a society by providing a dependable economic infrastructure and promoting the power of individuals.
   5533 
   5534 This paper attempts to put existing works of P2P designs into the perspective of the five-layer architecture model to realize LPLC, and proposes future research directions toward integration of P2P studies for actualization of a dependable and sustainable social infrastructure},
   5535         www_section = {LPLC, P2P, peer-to-peer networking},
   5536         doi = {http://doi.ieeecomputersociety.org/10.1109/SAINT-W.2007.59},
   5537         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SAINT\%2707\%20-\%20Local\%20production\%2C\%20local\%20consumption\%20p2p\%20architecture.pdf},
   5538         author = {Saito, Kenji and Morino, Eiichi and Yoshihiko Suko and Takaaki Suzuki and Murai, Jun}
   5539 }
   5540 @conference {bauer:wpes2007,
   5541         title = {Low-Resource Routing Attacks Against Tor},
   5542         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2007)},
   5543         year = {2007},
   5544         month = {October},
   5545         publisher = {ACM  New York, NY, USA},
   5546         organization = {ACM  New York, NY, USA},
   5547         address = {Washington, DC, USA},
   5548         abstract = {Tor has become one of the most popular overlay networks for anonymizing TCP traffic. Its popularity is due in part to its perceived strong anonymity properties and its relatively low latency service. Low latency is achieved through Tor{\^a}€™s ability to balance the traffic load by optimizing Tor router selection to probabilistically favor routers with highbandwidth capabilities.
   5549 We investigate how Tor{\^a}€™s routing optimizations impact its ability to provide strong anonymity. Through experiments conducted on PlanetLab, we show the extent to which routing performance optimizations have left the system vulnerable to end-to-end traffic analysis attacks from non-global adversaries with minimal resources. Further, we demonstrate that entry guards, added to mitigate path disruption attacks, are themselves vulnerable to attack. Finally, we explore solutions to improve Tor{\^a}€™s current routing algorithms and propose alternative routing strategies that prevent some of the routing attacks used in our experiments},
   5550         www_section = {anonymity, load balancing, Tor, traffic analysis},
   5551         isbn = {978-1-59593-883-1},
   5552         doi = {10.1145/1314333.1314336},
   5553         url = {http://portal.acm.org/citation.cfm?id=1314336},
   5554         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bauer-wpes2007.pdf},
   5555         author = {Kevin Bauer and Damon McCoy and Dirk Grunwald and Tadayoshi Kohno and Douglas Sicker}
   5556 }
   5557 @article {2007_6,
   5558         title = {Mapping an Arbitrary Message to an Elliptic Curve when Defined over GF (2n)},
   5559         journal = {International Journal of Network Security},
   5560         volume = {8},
   5561         year = {2007},
   5562         month = mar,
   5563         pages = {169--176},
   5564         chapter = {169},
   5565         abstract = {The use of elliptic curve cryptography (ECC) when used
   5566 as a public-key cryptosystem for encryption is such that if
   5567 one has a message to encrypt, then they attempt to map
   5568 it to some point in the prime subgroup of the elliptic curve
   5569 by systematically modifying the message in a determinis-
   5570 tic manner. The applications typically used for ECC are
   5571 the key-exchange, digital signature or a hybrid encryption
   5572 systems (ECIES) all of which avoid this problem. In this
   5573 paper we provide a deterministic method that guarantees
   5574 that the map of a message to an elliptic curve point can
   5575 be made without any modification. This paper provides
   5576 a solution to the open problem posed in [7] concerning
   5577 the creation of a deterministic method to map arbitrary
   5578 message to an elliptic curve},
   5579         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ijns-2009-v8-n2-p169-176.pdf},
   5580         author = {Brian King}
   5581 }
   5582 @conference {DBLP:conf/infocom/ZhangCY07,
   5583         title = {MARCH: A Distributed Incentive Scheme for Peer-to-Peer Networks},
   5584         booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications},
   5585         year = {2007},
   5586         month = may,
   5587         pages = {1091--1099},
   5588         publisher = {IEEE Computer Society},
   5589         organization = {IEEE Computer Society},
   5590         address = {Anchorage, Alaska, USA},
   5591         abstract = {As peer-to-peer networks grow larger and include more diverse users, the lack of incentive to encourage cooperative behavior becomes one of the key problems. This challenge cannot be fully met by traditional incentive schemes, which suffer from various attacks based on false reports. Especially, due to the lack of central authorities in typical P2P systems, it is difficult to detect colluding groups. Members in the same colluding group can cooperate to manipulate their history information, and the damaging power increases dramatically with the group size. In this paper, we propose a new distributed incentive scheme, in which the benefit that a node can obtain from the system is proportional to its contribution to the system, and a colluding group cannot gain advantage by cooperation regardless of its size. Consequently, the damaging power of colluding groups is strictly limited. The proposed scheme includes three major components: a distributed authority infrastructure, a key sharing protocol, and a contract verification protocol},
   5592         www_section = {march},
   5593         isbn = {1-4244-1047-9 },
   5594         doi = {http://dx.doi.org/10.1109/INFCOM.2007.131},
   5595         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20MARCH.pdf},
   5596         author = {Zhan Zhang and Shigang Chen and MyungKeun Yoon}
   5597 }
   5598 @conference {Magharei07meshor,
   5599         title = {Mesh or Multiple-Tree: A Comparative Study of Live P2P Streaming Approaches},
   5600         booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications},
   5601         year = {2007},
   5602         month = may,
   5603         pages = {1424--1432},
   5604         publisher = {IEEE Computer Society},
   5605         organization = {IEEE Computer Society},
   5606         address = {Anchorage, Alaska, USA},
   5607         abstract = {Existing approaches to P2P streaming can be divided into two general classes: (i) tree-based approaches use push-based content delivery over multiple tree-shaped overlays, and (ii) mesh-based approaches use swarming content delivery over a randomly connected mesh. Previous studies have often focused on a particular P2P streaming mechanism and no comparison between these two classes has been conducted. In this paper, we compare and contrast the performance of representative protocols from each class using simulations. We identify the similarities and differences between these two approaches. Furthermore, we separately examine the behavior of content delivery and overlay construction mechanisms for both approaches in static and dynamic scenarios. Our results indicate that the mesh-based approach consistently exhibits a superior performance over the tree-based approach. We also show that the main factors attributing in the inferior performance of the tree-based approach are (i) the static mapping of content to a particular tree, and (ii) the placement of each peer as an internal node in one tree and as a leaf in all other trees},
   5608         www_section = {mesh, multple tree, overlay, P2P, peer-to-peer networking},
   5609         isbn = {1-4244-1047-9 },
   5610         doi = {http://dx.doi.org/10.1109/INFCOM.2007.168},
   5611         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20Mesh\%20or\%20multiple-tree.pdf},
   5612         author = {Magharei, Nazanin and Rejaie, Reza}
   5613 }
   5614 @book {2007_7,
   5615         title = {Multiparty Computation for Interval, Equality, and Comparison Without Bit-Decomposition Protocol},
   5616         booktitle = {Public Key Cryptography -- PKC 2007},
   5617         series = {Lecture Notes in Computer Science},
   5618         volume = {4450},
   5619         year = {2007},
   5620         pages = {343--360},
   5621         publisher = {Springer Berlin Heidelberg},
   5622         organization = {Springer Berlin Heidelberg},
   5623         abstract = {Damg{\r a}rd et al. [11] showed a novel technique to convert a polynomial sharing of secret a into the sharings of the bits of a in constant rounds, which is called the bit-decomposition protocol. The bit-decomposition protocol is a very powerful tool because it enables bit-oriented operations even if shared secrets are given as elements in the field. However, the bit-decomposition protocol is relatively expensive.
   5624 In this paper, we present a simplified bit-decomposition protocol by analyzing the original protocol. Moreover, we construct more efficient protocols for a comparison, interval test and equality test of shared secrets without relying on the bit-decomposition protocol though it seems essential to such bit-oriented operations. The key idea is that we do computation on secret a with c and r where c = a + r, c is a revealed value, and r is a random bitwise-shared secret. The outputs of these protocols are also shared without being revealed.
   5625 The realized protocols as well as the original protocol are constant-round and run with less communication rounds and less data communication than those of [11]. For example, the round complexities are reduced by a factor of approximately 3 to 10},
   5626         www_section = {Bitwise Sharing, Multiparty Computation, secret sharing},
   5627         isbn = {978-3-540-71676-1},
   5628         doi = {10.1007/978-3-540-71677-8_23},
   5629         url = {http://dx.doi.org/10.1007/978-3-540-71677-8_23},
   5630         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiPartyComputation2007Nishide.pdf},
   5631         author = {Nishide, Takashi and Ohta, Kazuo},
   5632         editor = {Okamoto, Tatsuaki and Wang, Xiaoyun}
   5633 }
   5634 @article {Banner:2007:MRA:1279660.1279673,
   5635         title = {Multipath routing algorithms for congestion minimization},
   5636         journal = {IEEE/ACM Trans. Netw},
   5637         volume = {15},
   5638         year = {2007},
   5639         month = apr,
   5640         pages = {413--424},
   5641         publisher = {IEEE Press},
   5642         address = {Piscataway, NJ, USA},
   5643         abstract = {Unlike traditional routing schemes that route all traffic along a single path, multipath routing strategies split the traffic among several paths in order to ease congestion. It has been widely recognized that multipath routing can be fundamentally more efficient than the traditional approach of routing along single paths. Yet, in contrast to the single-path routing approach, most studies in the context of multipath routing focused on heuristic methods. We demonstrate the significant advantage of optimal (or near optimal) solutions. Hence, we investigate multipath routing adopting a rigorous (theoretical) approach. We formalize problems that incorporate two major requirements of multipath routing. Then, we establish the intractability of these problems in terms of computational complexity. Finally, we establish efficient solutions with proven performance guarantees},
   5644         www_section = {computer networks, congestion avoidance, routing protocols},
   5645         issn = {1063-6692},
   5646         doi = {http://dx.doi.org/10.1109/TNET.2007.892850},
   5647         url = {http://dx.doi.org/10.1109/TNET.2007.892850},
   5648         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Banner\%20\%26\%20Orda.pdf},
   5649         author = {Banner, Ron and Orda, Ariel}
   5650 }
   5651 @conference {Dimakis:2010:NCD:1861840.1861868,
   5652         title = {Network coding for distributed storage systems},
   5653         booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications},
   5654         volume = {56},
   5655         year = {2007},
   5656         month = may,
   5657         pages = {4539--4551},
   5658         publisher = {IEEE Press},
   5659         organization = {IEEE Press},
   5660         address = {Anchorage, Alaska, USA},
   5661         abstract = {Distributed storage systems provide reliable access to data through redundancy spread over individually unreliable nodes. Application scenarios include data centers, peer-to-peer storage systems, and storage in wireless networks. Storing data using an erasure code, in fragments spread across nodes, requires less redundancy than simple replication for the same level of reliability. However, since fragments must be periodically replaced as nodes fail, a key question is how to generate encoded fragments in a distributed way while transferring as little data as possible across the network. For an erasure coded system, a common practice to repair from a single node failure is for a new node to reconstruct the whole encoded data object to generate just one encoded block. We show that this procedure is sub-optimal. We introduce the notion of regenerating codes, which allow a new node to communicate functions of the stored data from the surviving nodes. We show that regenerating codes can significantly reduce the repair bandwidth. Further, we show that there is a fundamental tradeoff between storage and repair bandwidth which we theoretically characterize using flow arguments on an appropriately constructed graph. By invoking constructive results in network coding, we introduce regenerating codes that can achieve any point in this optimal tradeoff},
   5662         www_section = {distributed storage, network coding, peer-to-peer storage, Regenerating Codes},
   5663         issn = {0018-9448},
   5664         doi = {http://dx.doi.org/10.1109/TIT.2010.2054295},
   5665         url = {http://dx.doi.org/10.1109/TIT.2010.2054295},
   5666         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20Network\%20coding\%20for\%20distributed\%20storage\%20systems.pdf},
   5667         author = {Dimakis, Alexandros G. and Godfrey, Brighten and Wu, Yunnan and Wainwright, Martin J. and Ramchandran, Kannan}
   5668 }
   5669 @conference {2007_8,
   5670         title = {A New Efficient Privacy-preserving Scalar Product Protocol},
   5671         booktitle = {Proceedings of the Sixth Australasian Conference on Data Mining and Analytics--Volume 70},
   5672         year = {2007},
   5673         publisher = {Australian Computer Society, Inc},
   5674         organization = {Australian Computer Society, Inc},
   5675         address = {Darlinghurst, Australia, Australia},
   5676         abstract = {Recently, privacy issues have become important in data analysis, especially when data is horizontally partitioned over several parties. In data mining, the data is typically represented as attribute-vectors and, for many applications, the scalar (dot) product is one of the fundamental operations that is repeatedly used.
   5677 
   5678 In privacy-preserving data mining, data is distributed across several parties. The efficiency of secure scalar products is important, not only because they can cause overhead in communication cost, but dot product operations also serve as one of the basic building blocks for many other secure protocols.
   5679 
   5680 Although several solutions exist in the relevant literature for this problem, the need for more efficient and more practical solutions still remains. In this paper, we present a very efficient and very practical secure scalar product protocol. We compare it to the most common scalar product protocols. We not only show that our protocol is much more efficient than the existing ones, we also provide experimental results by using a real life dataset},
   5681         www_section = {privacy preserving data mining},
   5682         isbn = {978-1-920682-51-4},
   5683         url = {http://dl.acm.org/citation.cfm?id=1378245.1378274},
   5684         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreserving2007Ambirbekyan.pdf},
   5685         author = {Amirbekyan, Artak and Estivill-Castro, Vladimir}
   5686 }
   5687 @conference {2007_9,
   5688         title = {ParaNets: A Parallel Network Architecture for Challenged Networks},
   5689         booktitle = {ParaNets: A Parallel Network Architecture for Challenged Networks},
   5690         year = {2007},
   5691         month = mar,
   5692         abstract = {Networks characterized by challenges, such as intermittent connectivity, network heterogeneity, and large delays, are called "challenged networks". We propose a novel network architecture for challenged networks dubbed Parallel Networks, or, ParaNets. The vision behind ParaNets is to have challenged network protocols operate over multiple heterogenous networks, simultaneously available, through one or more devices. We present the ParaNets architecture and discuss its short-term challenges and longterm implications. We also argue, based on current research trends and the ParaNets architecture, for the evolution of the conventional protocol stack to a more flexible cross-layered protocol tree. To demonstrate the potential impact of ParaNets, we use Delay Tolerant Mobile Networks (DTMNs) as a representative challenged network over which we evaluate ParaNets. Our ultimate goal in this paper is to open the way for further work in challenged networks using ParaNets as the underlying architecture},
   5693         isbn = {978-0-7695-3001-7},
   5694         url = {http://ieeexplore.ieee.org/Xplore/login.jsp?reload=true\&url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4389542\%2F4389543\%2F04389561.pdf\%3Farnumber\%3D4389561\&authDecision=-203},
   5695         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hotmobile07.pdf},
   5696         author = {Khaled A. Harras and Mike P. Wittie and Kevin C. Almeroth and Elizabeth M. Belding}
   5697 }
   5698 @conference {Petcu:2007:PNP:1625275.1625301,
   5699         title = {PC-DPOP: a new partial centralization algorithm for distributed optimization},
   5700         booktitle = {IJCAI'07--Proceedings of the 20th international joint conference on Artifical intelligence},
   5701         series = {IJCAI'07},
   5702         year = {2007},
   5703         month = jan,
   5704         pages = {167--172},
   5705         publisher = {Morgan Kaufmann Publishers Inc},
   5706         organization = {Morgan Kaufmann Publishers Inc},
   5707         address = {Hyderabad, India},
   5708         abstract = {Fully decentralized algorithms for distributed constraint optimization often require excessive amounts of communication when applied to complex problems. The OptAPO algorithm of [Mailler and Lesser, 2004] uses a strategy of partial centralization to mitigate this problem.
   5709 
   5710 We introduce PC-DPOP, a new partial centralization technique, based on the DPOP algorithm of [Petcu and Faltings, 2005]. PC-DPOP provides better control over what parts of the problem are centralized and allows this centralization to be optimal with respect to the chosen communication structure.
   5711 
   5712 Unlike OptAPO, PC-DPOP allows for a priory, exact predictions about privacy loss, communication, memory and computational requirements on all nodes and links in the network. Upper bounds on communication and memory requirements can be specified.
   5713 
   5714 We also report strong efficiency gains over OptAPO in experiments on three problem domains},
   5715         www_section = {algorithms, distributed constraint optimization, DPOP, OptAPO, partial centralization technique},
   5716         url = {http://dl.acm.org/citation.cfm?id=1625275.1625301},
   5717         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IJCAI\%2707\%20-\%20PC-DPOP.pdf},
   5718         author = {Adrian Petcu and Boi Faltings and Mailler, Roger}
   5719 }
   5720 @conference {fuhrmann07wons,
   5721         title = {Performance of Scalable Source Routing in Hybrid MANETs},
   5722         booktitle = {Proceedings of the Fourth Annual Conference on Wireless On demand Network Systems and Services},
   5723         year = {2007},
   5724         pages = {122--129},
   5725         type = {publication},
   5726         address = {Obergurgl, Austria},
   5727         abstract = {Scalable source routing (SSR) is a novel routing approach for large unstructured networks such as mobile ad hoc networks, mesh networks, or sensor-actuator networks. It is especially suited for organically growing networks of many resource-limited mobile devices supported by a few fixed-wired nodes. SSR is a full-fledged network layer routing protocol that directly provides the semantics of a structured peer-to-peer network. Hence, it can serve as an efficient basis for fully decentralized applications on mobile devices. SSR combines source routing in the physical network with Chord-like routing in the virtual ring formed by the address space. Message forwarding greedily decreases the distance in the virtual ring while preferring physically short paths. Thereby, scalability is achieved without imposing artificial hierarchies or assigning location-dependent addresses},
   5728         www_section = {mobile Ad-hoc networks, P2P, scalable source routing},
   5729         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   5730         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann07wons.pdf},
   5731         author = {Thomas Fuhrmann}
   5732 }
   5733 @book {2007_10,
   5734         title = {Practical and Secure Solutions for Integer Comparison},
   5735         booktitle = {Public Key Cryptography -- PKC 2007},
   5736         series = {Lecture Notes in Computer Science},
   5737         volume = {4450},
   5738         year = {2007},
   5739         pages = {330--342},
   5740         publisher = {Springer Berlin Heidelberg},
   5741         organization = {Springer Berlin Heidelberg},
   5742         abstract = {Yao's classical millionaires' problem is about securely determining whether x > y, given two input values x,y, which are held as private inputs by two parties, respectively. The output x > y becomes known to both parties.
   5743 In this paper, we consider a variant of Yao's problem in which the inputs x,y as well as the output bit x > y are encrypted. Referring to the framework of secure n-party computation based on threshold homomorphic cryptosystems as put forth by Cramer, Damg{\r a}rd, and Nielsen at Eurocrypt 2001, we develop solutions for integer comparison, which take as input two lists of encrypted bits representing x and y, respectively, and produce an encrypted bit indicating whether x > y as output. Secure integer comparison is an important building block for applications such as secure auctions.
   5744 In this paper, our focus is on the two-party case, although most of our results extend to the multi-party case. We propose new logarithmic-round and constant-round protocols for this setting, which achieve simultaneously very low communication and computational complexities. We analyze the protocols in detail and show that our solutions compare favorably to other known solutions},
   5745         www_section = {homomorphic encryption, Millionaires' problem, secure multi-party computation},
   5746         isbn = {978-3-540-71676-1},
   5747         doi = {10.1007/978-3-540-71677-8_22},
   5748         url = {http://dx.doi.org/10.1007/978-3-540-71677-8_22},
   5749         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IntegerComparisonSolution2007Garay.pdf},
   5750         author = {Garay, Juan and Schoenmakers, Berry and Villegas, Jos{\'e}},
   5751         editor = {Okamoto, Tatsuaki and Wang, Xiaoyun}
   5752 }
   5753 @conference {Dwork2007,
   5754         title = {The Price of Privacy and the Limits of LP Decoding},
   5755         booktitle = {The Price of Privacy and the Limits of LP Decoding},
   5756         year = {2007},
   5757         pages = {85--94},
   5758         author = {Cynthia Dwork and Frank D. McSherry and Kunal Talwar}
   5759 }
   5760 @conference {Magharei:2009:PPR:1618562.1618566,
   5761         title = {PRIME: Peer-to-Peer Receiver-drIven MEsh-based Streaming},
   5762         booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications},
   5763         volume = {17},
   5764         year = {2007},
   5765         month = may,
   5766         pages = {1052--1065},
   5767         publisher = {IEEE Press},
   5768         organization = {IEEE Press},
   5769         address = {Anchorage, Alaska, USA},
   5770         abstract = {The success of file swarming mechanisms such as BitTorrent has motivated a new approach for scalable streaming of live content that we call mesh-based Peer-to-Peer (P2P) streaming. In this approach, participating end-systems (or peers) form a randomly connected mesh and incorporate swarming content delivery to stream live content. Despite the growing popularity of this approach, neither the fundamental design tradeoffs nor the basic performance bottlenecks in mesh-based P2P streaming are well understood.
   5771 
   5772 In this paper, we follow a performance-driven approach to design PRIME, a scalable mesh-based P2P streaming mechanism for live content. The main design goal of PRIME is to minimize two performance bottlenecks, namely bandwidth bottleneck and content bottleneck. We show that the global pattern of delivery for each segment of live content should consist of a diffusion phase which is followed by a swarming phase. This leads to effective utilization of available resources to accommodate scalability and also minimizes content bottleneck. Using packet level simulations, we carefully examine the impact of overlay connectivity, packet scheduling scheme at individual peers and source behavior on the overall performance of the system. Our results reveal fundamental design tradeoffs of mesh-based P2P streaming for live content},
   5773         www_section = {communication network, computer networks, Internet, multimedia communication, multimedia systems},
   5774         issn = {1063-6692},
   5775         doi = {http://dx.doi.org/10.1109/TNET.2008.2007434},
   5776         url = {http://dx.doi.org/10.1109/TNET.2008.2007434},
   5777         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20PRIME.pdf},
   5778         author = {Magharei, Nazanin and Rejaie, Reza}
   5779 }
   5780 @article {1273222,
   5781         title = {Privacy protection in personalized search},
   5782         journal = {SIGIR Forum},
   5783         volume = {41},
   5784         number = {1},
   5785         year = {2007},
   5786         pages = {4--17},
   5787         publisher = {ACM},
   5788         address = {New York, NY, USA},
   5789         abstract = {Personalized search is a promising way to improve the accuracy of web search, and has been attracting much attention recently. However, effective personalized search requires collecting and aggregating user information, which often raise serious concerns of privacy infringement for many users. Indeed, these concerns have become one of the main barriers for deploying personalized search applications, and how to do privacy-preserving personalization is a great challenge. In this paper, we systematically examine the issue of privacy preservation in personalized search. We distinguish and define four levels of privacy protection, and analyze various software architectures for personalized search. We show that client-side personalization has advantages over the existing server-side personalized search services in preserving privacy, and envision possible future strategies to fully protect user privacy},
   5790         www_section = {privacy, search},
   5791         issn = {0163-5840},
   5792         doi = {http://doi.acm.org/10.1145/1273221.1273222},
   5793         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2007j_sigirforum_shen.pdf},
   5794         author = {Shen, Xuehua and Tan, Bin and Zhai, ChengXiang}
   5795 }
   5796 @booklet {Bellovin2007,
   5797         title = {Privacy-enhanced searches using encrypted Bloom filters},
   5798         year = {2007},
   5799         pages = {1--16},
   5800         publisher = {Columbia University CUCS-034-07},
   5801         author = {Bellovin, Steven M. and Cheswick, William R.}
   5802 }
   5803 @article {1327188,
   5804         title = {Private Searching on Streaming Data},
   5805         journal = {J. Cryptol},
   5806         volume = {20},
   5807         number = {4},
   5808         year = {2007},
   5809         pages = {397--430},
   5810         publisher = {Springer-Verlag New York, Inc},
   5811         address = {Secaucus, NJ, USA},
   5812         abstract = {In this paper we consider the problem of private searching on streaming data, where we can efficiently implement searching for documents that satisfy a secret criteria (such as the presence or absence of a hidden combination of hidden keywords) under various cryptographic assumptions. Our results can be viewed in a variety of ways: as a generalization of the notion of private information retrieval (to more general queries and to a streaming environment); as positive results on privacy-preserving datamining; and as a delegation of hidden program computation to other machines},
   5813         www_section = {keywords, privacy, private information retrieval, search, streaming},
   5814         issn = {0933-2790},
   5815         doi = {http://dx.doi.org/10.1007/s00145-007-0565-3},
   5816         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Ostrovsky-Skeith.pdf},
   5817         author = {Rafail Ostrovsky and William E. Skeith}
   5818 }
   5819 @mastersthesis {kostas-thesis,
   5820         title = {Probabilistic and Information-Theoretic Approaches to Anonymity},
   5821         year = {2007},
   5822         month = {October},
   5823         school = {Laboratoire d'Informatique (LIX), {\'E}cole Polytechnique, Paris},
   5824         type = {phd},
   5825         abstract = {As the number of Internet activities increases, there is a growing amount of personal information about the users that is transferred using public electronic means, making it feasible to collect a huge amount of information about a person. As a consequence, the need for mechanisms to protect such information is compelling. In this thesis, we study security protocols with an emphasis on the property of anonymity and we propose methods to express and verify this property.
   5826 
   5827 Anonymity protocols often use randomization to introduce noise, thus limiting the inference power of a malicious observer. We consider a probabilistic framework in which a protocol is described by its set of anonymous information, observable information and the conditional probability of observing the latter given the former. In this framework we express two anonymity properties, namely strong anonymity and probable innocence.
   5828 
   5829 Then we aim at quantitative definitions of anonymity. We view protocols as noisy channels in the information-theoretic sense and we express their degree of anonymity as the converse of channel capacity. We apply this definition to two known anonymity protocols. We develop a monotonicity principle for the capacity, and use it to show a number of results for binary channels in the context of algebraic information theory. We then study the probability of error for the attacker in the context of Bayesian inference, showing that it is a piecewise linear function and using this fact to improve known bounds from the literature.
   5830 
   5831 Finally we study a problem that arises when we combine probabilities with nondeterminism, where the scheduler is too powerful even for trivially secure protocols. We propose a process calculus which allows to express restrictions to the scheduler, and we use it in the analysis of an anonymity and a contract-signing protocol},
   5832         url = {http://www.win.tue.nl/~kostas/these/},
   5833         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kostas-thesis.pdf},
   5834         author = {Konstantinos Chatzikokolakis}
   5835 }
   5836 @conference {ChatziPP07,
   5837         title = {Probability of Error in Information-Hiding Protocols},
   5838         booktitle = {Proceedings of the 20th IEEE Computer Security Foundations Symposium (CSF20)},
   5839         year = {2007},
   5840         abstract = {Randomized protocols for hiding private information can fruitfully be regarded as noisy channels in the information-theoretic sense, and the inference of the concealed information can be regarded as a hypothesis-testing problem. We consider the Bayesian approach to the problem, and investigate the probability of error associated to the inference when the MAP (Maximum Aposteriori Probability) decision rule is adopted. Our main result is a constructive characterization of a convex base of the probability of error, which allows us to compute its maximum value (over all possible input distributions), and to identify upper bounds for it in terms of simple functions. As a side result, we are able to improve substantially the Hellman-Raviv and the Santhi-Vardy bounds expressed in terms of conditional entropy. We then discuss an application of our methodology to the Crowds protocol, and in particular we show how to compute the bounds on the probability that an adversary breaks anonymity},
   5841         www_section = {anonymity, privacy},
   5842         isbn = {0-7695-2819-8},
   5843         doi = {10.1109/CSF.2007.27},
   5844         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.79.2620},
   5845         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ChatziPP07.pdf},
   5846         author = {Konstantinos Chatzikokolakis and Catuscia Palamidessi and Prakash Panangaden}
   5847 }
   5848 @proceedings {DBLP:conf/stoc/2007,
   5849         booktitle = {Proceedings of the 39th Annual ACM Symposium on Theory of Computing (STOC'07)},
   5850         journal = {Proceedings of the 39th Annual ACM Symposium on Theory of Computing (STOC'07)},
   5851         year = {2007},
   5852         month = {June},
   5853         publisher = {ACM},
   5854         address = {San Diego, California, USA},
   5855         isbn = {978-1-59593-631-8},
   5856         editor = {David S. Johnson and Uriel Feige}
   5857 }
   5858 @mastersthesis {kising07proxselectigor,
   5859         title = {Proximity Neighbor Selection and Proximity Route Selection for the Overlay-Network IGOR},
   5860         volume = {Computer Science},
   5861         year = {2007},
   5862         month = jun,
   5863         pages = {0--79},
   5864         school = {Technische Universit{\"a}t M{\"u}nchen},
   5865         type = {Diplomarbeit},
   5866         address = {Munich, Germany},
   5867         abstract = {Unfortunately, from all known "Distributed Hash Table"-based overlay networks only a few of them relate to proximity in terms of latency. So a query routing can come with high latency when very distant hops are used. One can imagine hops are from one continent to the other in terms of here and back. Thereby it is possible that the target node is located close to the requesting node. Such cases increase query latency to a great extent and are responsible for performance bottlenecks of a query routing.
   5868 There exist two main strategies to reduce latency in the query routing process: Proximity Neighbor Selection and Proximity Route Selection. As a new proposal of PNS for the IGOR overlay network, Merivaldi is developed. Merivaldi represents a combination of two basic ideas: The first idea is the Meridian framework and its Closest-Node- Discovery without synthetic coordinates. The second idea is Vivaldi, a distributed algorithm for predicting Internet latency between arbitrary Internet hosts. Merivaldi is quite similar to Meridian. It differs in using no direct Round Trip Time measurements like Meridian does to obtain latency characteristics between hosts. Merivaldi obtains latency characteristics of nodes using the latency prediction derived from the Vivaldi-coordinates. A Merivaldi-node forms exponentially growing latency-rings, i.e., the rings correspond to latency distances to the Merivaldi-node itself. In these rings node-references are inserted with regard to their latency characteristics. These node-references are obtained through a special protocol. A Merivaldi-node finds latency-closest nodes through periodic querying its ring-members for closer nodes. If a closer node is found by a ring-member the query is forwarded to this one until no closer one can be found. The closest on this way reports itself to the Merivaldi-node.
   5869 Exemplary analysis show that Merivaldi means only a modest burden for the network. Merivaldi uses O(log N) CND-hops at maximum to recognize a closest node, where N is the number of nodes. Empirical tests demonstrate this analysis. Analysis shows, the overhead for a Merivaldi-node is modest. It is shown that Merivaldi's Vivaldi works with high quality with the used PING-message type},
   5870         www_section = {IGOR, neighbor selection, overlay-network, proximity route selection},
   5871         url = {http://i30www.ira.uka.de/teaching/theses/pasttheses/},
   5872         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kising\%20-\%20Proximity\%20Neighbor\%20Selection\%20for\%20IGOR.pdf},
   5873         author = {Yves Philippe Kising}
   5874 }
   5875 @conference {1361410,
   5876         title = {Purely functional system configuration management},
   5877         booktitle = {HOTOS'07: Proceedings of the 11th USENIX workshop on Hot topics in operating systems},
   5878         year = {2007},
   5879         pages = {1--6},
   5880         publisher = {USENIX Association},
   5881         organization = {USENIX Association},
   5882         address = {Berkeley, CA, USA},
   5883         abstract = {System configuration management is difficult because systems evolve in an undisciplined way: packages are upgraded, configuration files are edited, and so on. The management of existing operating systems is strongly imperative in nature, since software packages and configuration data (e.g., /bin and /etc in Unix) can be seen as imperative data structures: they are updated in-place by system administration actions. In this paper we present an alternative approach to system configuration management: a purely functional method, analogous to languages like Haskell. In this approach, the static parts of a configuration -- software packages, configuration files, control scripts -- are built from pure functions, i.e., the results depend solely on the specified inputs of the function and are immutable. As a result, realising a system configuration becomes deterministic and reproducible. Upgrading to a new configuration is mostly atomic and doesn't overwrite anything of the old configuration, thus enabling rollbacks. We have implemented the purely functional model in a small but realistic Linux-based operating system distribution called NixOS},
   5884         url = {http://portal.acm.org/citation.cfm?id=1361410$\#$},
   5885         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dolstra.pdf},
   5886         author = {Dolstra, Eelco and Hemel, Armijn}
   5887 }
   5888 @conference { pitchblack,
   5889         title = {Routing in the Dark: Pitch Black},
   5890         booktitle = {23rd Annual Computer Security Applications Conference (ACSAC 2007)},
   5891         year = {2007},
   5892         pages = {305--314},
   5893         publisher = {IEEE Computer Society},
   5894         organization = {IEEE Computer Society},
   5895         abstract = {In many networks, such as mobile ad-hoc networks and friend-to-friend overlay networks, direct communication between nodes is limited to specific neighbors.  Often these networks have a small-world topology; while short paths exist between any pair of nodes in small-world networks, it is non-trivial to determine such paths with a distributed algorithm.  Recently, Clarke and Sandberg
   5896 proposed the first decentralized routing algorithm that achieves efficient routing in such small-world networks.
   5897 
   5898 This paper is the first independent security analysis of Clarke and Sandberg's routing algorithm. We show that a relatively weak participating adversary can render the overlay ineffective without being detected, resulting in significant data loss due to the resulting load imbalance.  We have measured the impact of the attack
   5899 in a testbed of 800 nodes using minor modifications to Clarke and Sandberg's implementation of their routing algorithm in Freenet. Our experiments show that the attack is highly effective, allowing a small number of malicious nodes to cause rapid loss of data on the entire network.
   5900 
   5901 We also discuss various proposed countermeasures designed to detect, thwart or limit the attack. While we were unable to find effective countermeasures, we hope that the presented analysis will be a first step towards the design of secure distributed routing algorithms for restricted-route topologies},
   5902         www_section = {denial-of-service, Freenet, installation, routing},
   5903         url = {http://grothoff.org/christian/pitchblack.pdf},
   5904         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pitchblack.pdf},
   5905         author = {Nathan S Evans and Chis GauthierDickey and Christian Grothoff}
   5906 }
   5907 @conference {murdoch-pet2007,
   5908         title = {Sampled Traffic Analysis by Internet-Exchange-Level Adversaries},
   5909         booktitle = {Proceedings of the Seventh Workshop on Privacy Enhancing Technologies (PET 2007)},
   5910         year = {2007},
   5911         month = {June},
   5912         publisher = {Springer},
   5913         organization = {Springer},
   5914         address = {Ottawa, Canada},
   5915         abstract = {Existing low-latency anonymity networks are vulnerable to traffic analysis, so location diversity of nodes is essential to defend against attacks. Previous work has shown that simply ensuring geographical diversity of nodes does not resist, and in some cases exacerbates, the risk of traffic analysis by ISPs. Ensuring high autonomous-system (AS) diversity can resist this weakness. However, ISPs commonly connect to many other ISPs in a single location, known as an Internet eXchange (IX). This paper shows that IXes are a single point where traffic analysis can be performed. We examine to what extent this is true, through a case study of Tor nodes in the UK. Also, some IXes sample packets flowing through them for performance analysis reasons, and this data could be exploited to de-anonymize traffic. We then develop and evaluate Bayesian traffic analysis techniques capable of processing this sampled data},
   5916         www_section = {anonymity, Internet exchange, traffic analysis},
   5917         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/murdoch-pet2007.pdf},
   5918         author = {Steven J. Murdoch and Piotr Zieli{\'n}ski},
   5919         editor = {Borisov, Nikita and Philippe Golle}
   5920 }
   5921 @mastersthesis {2007_11,
   5922         title = {Secure asynchronous change notifications for a distributed file system},
   5923         volume = {Computer Science},
   5924         year = {2007},
   5925         month = nov,
   5926         pages = {0--74},
   5927         school = {Technische Universit{\"a}t M{\"u}nchen},
   5928         address = {Munich, Germany},
   5929         abstract = {Distributed file systems have been a topic of interest for a long time and there are many file systems that are distributed in one way or another. However most distributed file systems are only reasonably usable within a local network of computers and some main tasks are still delegated to a very small number of servers.
   5930 Today with the advent of Peer-to-Peer technology, distributed file systems that work on top of Peer-to-Peer systems can be built. These systems can be built with no or much less centralised components and are usable on a global scale.
   5931 The System Architecture Group at the University of Karlsruhe in Germany has developedsuch a file system, which is built on top of a structured overlay network and uses Distributed Hash Tables to store and access the information.
   5932 One problem with this approach is, that each file system can only be accessed with the help of an identifier, which changes whenever a file system is modified. All clients have to be notified of the new identifier in a secure, fast and reliable way.
   5933 Usually the strategy to solve this type of problem is an encrypted multicast. This thesis presents and analyses several strategies of using multicast distributions to solve this problem and then unveils our final solution based on the Subset Difference method proposed by Naor et al},
   5934         www_section = {distributed file system, distributed hash table, peer-to-peer networking, store information},
   5935         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Amann\%20-\%20Secure\%20asynchronous\%20change\%20notifications.pdf},
   5936         author = {Bernhard Amann}
   5937 }
   5938 @conference {saballus07secure,
   5939         title = {Secure Group Communication in Ad-Hoc Networks using Tree Parity Machines},
   5940         booktitle = {KiVS 2007},
   5941         year = {2007},
   5942         month = feb,
   5943         pages = {457--468},
   5944         publisher = {VDE Verlag},
   5945         organization = {VDE Verlag},
   5946         address = {Bern, Switzerland},
   5947         abstract = {A fundamental building block of secure group communication is the establishment of a common group key. This can be divided into key agreement and key distribution. Common group key agreement protocols are based on the Diffie-Hellman (DH) key exchange and extend it to groups. Group key distribution protocols are centralized approaches which make use of one or more special key servers. In contrast to these approaches, we present a protocol which makes use of the Tree Parity Machine key exchange between multiple parties. It does not need a centralized server and therefore is especially suitable for ad-hoc networks of any kind},
   5948         www_section = {ad-hoc networks},
   5949         isbn = {978-3-8007-2980-7},
   5950         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.9413},
   5951         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saballus07secure.pdf},
   5952         author = {Bjoern Saballus and Sebastian Wallner and Markus Volkmer}
   5953 }
   5954 @article {Kaafar:2007:SIC:1282427.1282388,
   5955         title = {Securing Internet Coordinate Embedding Systems},
   5956         journal = {SIGCOMM Computer Communication Review},
   5957         volume = {37},
   5958         year = {2007},
   5959         month = aug,
   5960         pages = {61--72},
   5961         publisher = {ACM},
   5962         address = {New York, NY, USA},
   5963         abstract = {This paper addresses the issue of the security of Internet Coordinate Systems,by proposing a general method for malicious behavior detection during coordinate computations. We first show that the dynamics of a node, in a coordinate system without abnormal or malicious behavior, can be modeled by a Linear State Space model and tracked by a Kalman filter. Then we show, that the obtained model can be generalized in the sense that the parameters of a filtercalibrated at a node can be used effectively to model and predict the dynamic behavior at another node, as long as the two nodes are not too far apart in the network. This leads to the proposal of a Surveyor infrastructure: Surveyor nodes are trusted, honest nodes that use each other exclusively to position themselves in the coordinate space, and are therefore immune to malicious behavior in the system.During their own coordinate embedding, other nodes can thenuse the filter parameters of a nearby Surveyor as a representation of normal, clean system behavior to detect and filter out abnormal or malicious activity. A combination of simulations and PlanetLab experiments are used to demonstrate the validity, generality, and effectiveness of the proposed approach for two representative coordinate embedding systems, namely Vivaldi and NPS},
   5964         www_section = {internet coordinates-embedding systems, kalman filter, malicious behavior detection, network positioning systems, security},
   5965         issn = {0146-4833},
   5966         doi = {http://doi.acm.org/10.1145/1282427.1282388},
   5967         url = {http://doi.acm.org/10.1145/1282427.1282388},
   5968         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20Securing\%20Internet\%20Coordinate\%20Embedding\%20Systems.pdf},
   5969         author = {Kaafar, Mohamed Ali and Laurent Mathy and Barakat, Chadi and Salamatian, Kave and Turletti, Thierry and Dabbous, Walid}
   5970 }
   5971 @conference {Conner:2007:SPM:1377934.1377937,
   5972         title = {Securing peer-to-peer media streaming systems from selfish and malicious behavior},
   5973         booktitle = {MDS'07. Proceedings of the 4th on Middleware Doctoral Symposium},
   5974         series = {MDS '07},
   5975         volume = {13},
   5976         year = {2007},
   5977         month = nov,
   5978         pages = {1--6},
   5979         publisher = {ACM},
   5980         organization = {ACM},
   5981         address = {Newport Beach, CA, USA},
   5982         abstract = {We present a flexible framework for throttling attackers in peer-to-peer media streaming systems. In such systems, selfish nodes (e.g., free riders) and malicious nodes (e.g., DoS attackers) can overwhelm the system by issuing too many requests in a short interval of time. Since peer-to-peer systems are decentralized, it is difficult for individual peers to limit the aggregate download bandwidth consumed by other remote peers. This could potentially allow selfish and malicious peers to exhaust the system's available upload bandwidth. In this paper, we propose a framework to provide a solution to this problem by utilizing a subset of trusted peers (called kantoku nodes) that collectively monitor the bandwidth usage of untrusted peers in the system and throttle attackers. This framework has been evaluated through simulation thus far. Experiments with a full implementation on a network testbed are part of our future work},
   5983         www_section = {accounting, multimedia, peer-to-peer networking, security},
   5984         isbn = {978-1-59593-933-3},
   5985         doi = {http://doi.acm.org/10.1145/1377934.1377937},
   5986         url = {http://doi.acm.org/10.1145/1377934.1377937},
   5987         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MDS\%2707\%20-\%20Conner\%20\%26\%20Nahrstedt\%20-\%20Securing\%20peer-to-peer\%20media\%20streaming\%20systems.pdf},
   5988         author = {Conner, William and Nahrstedt, Klara}
   5989 }
   5990 @conference {DBLP:conf/ladc/CourtesKP07,
   5991         title = {Security Rationale for a Cooperative Backup Service for Mobile Devices},
   5992         booktitle = {LADC},
   5993         year = {2007},
   5994         pages = {212--230},
   5995         abstract = {Mobile devices (e.g., laptops, PDAs, cell phones) are increasingly relied on but are used in contexts that put them at risk of physical damage, loss or theft. This paper discusses security considerations that arise in the design of a cooperative backup service for mobile devices. Participating devices leverage encounters with other devices to temporarily replicate critical data. Anyone is free to participate in the cooperative service, without requiring any prior trust relationship with other participants. In this paper, we identify security threats relevant in this context as well as possible solutions and discuss how they map to low-level security requirements related to identity and trust establishment. We propose self-organized, policy-neutral mechanisms that allow the secure designation and identification of participating devices. We show that they can serve as a building block for a wide range of cooperation policies that address most of the security threats we are concerned with. We conclude on future directions},
   5996         www_section = {backup, reputation, self-organization},
   5997         doi = {10.1007/978-3-540-75294-3},
   5998         url = {http://www.springerlink.com/content/p210q274g22j8g77/},
   5999         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.106.5673.pdf},
   6000         author = {Ludovic Court{\`e}s and Killijian, Marc-Olivier and Powell, David}
   6001 }
   6002 @conference {1396915,
   6003         title = {S/Kademlia: A practicable approach towards secure key-based routing},
   6004         booktitle = {ICPADS '07: Proceedings of the 13th International Conference on Parallel and Distributed Systems},
   6005         year = {2007},
   6006         pages = {1--8},
   6007         publisher = {IEEE Computer Society},
   6008         organization = {IEEE Computer Society},
   6009         address = {Washington, DC, USA},
   6010         abstract = {Security is a common problem in completely decentralized peer-to-peer systems. Although several suggestions exist on how to create a secure key-based routing protocol, a practicable approach is still unattended. In this paper we introduce a secure key-based routing protocol based on Kademlia that has a high resilience against common attacks by using parallel lookups over multiple disjoint paths, limiting free nodeId generation with crypto puzzles and introducing a reliable sibling broadcast. The latter is needed to store data in a safe replicated way. We evaluate the security of our proposed extensions to the Kademlia protocol analytically and simulate the effects of multiple disjoint paths on lookup success under the influence of adversarial nodes},
   6011         isbn = {978-1-4244-1889-3},
   6012         doi = {10.1109/ICPADS.2007.4447808},
   6013         url = {http://portal.acm.org/citation.cfm?id=1396915$\#$},
   6014         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SKademlia2007.pdf},
   6015         author = {Baumgart, Ingmar and Sebastian Mies}
   6016 }
   6017 @conference {1326260,
   6018         title = {Skype4Games},
   6019         booktitle = {NetGames '07: Proceedings of the 6th ACM SIGCOMM workshop on Network and system support for games},
   6020         year = {2007},
   6021         pages = {13--18},
   6022         publisher = {ACM},
   6023         organization = {ACM},
   6024         address = {New York, NY, USA},
   6025         abstract = {We propose to take advantage of the distributed multi-user Skype system for the implementation of an interactive online game. Skype combines efficient multi-peer support with the ability to get around firewalls and network address translation; in addition, speech is available to all game participants for free. We discuss the network requirements of interactive multi-player games, in particular concerning end-to-end delay and distributed state maintenance. We then introduce the multi-user support available in Skype and conclude that it should suffice for a game implementation. We explain how our multi-player game based on the Irrlicht graphics engine was implemented over Skype, and we present very promising results of an early performance evaluation},
   6026         www_section = {distributed interactive applications, P2P},
   6027         isbn = {978-0-9804460-0-5},
   6028         doi = {10.1145/1326257.1326260},
   6029         url = {http://portal.acm.org/citation.cfm?id=1326260$\#$},
   6030         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Triebel2007a.pdf},
   6031         author = {Triebel, Tonio and Guthier, Benjamin and Effelsberg, Wolfgang}
   6032 }
   6033 @conference {space-efficient,
   6034         title = {Space-Efficient Private Search},
   6035         booktitle = {Proceedings of Financial Cryptography (FC2007)},
   6036         series = {Lecture Notes in Computer Science},
   6037         year = {2007},
   6038         publisher = {Springer-Verlag},
   6039         organization = {Springer-Verlag},
   6040         address = {Tobago},
   6041         abstract = {Private keyword search is a technique that allows for searching and retrieving documents matching certain keywords without revealing the search criteria. We improve the space efficiency of the Ostrovsky et al. Private Search [9] scheme, by describing methods that require considerably shorter buffers for returning the results of the search. Our basic decoding scheme recursive extraction, requires buffers of length less than twice the number of returned results and is still simple and highly efficient. Our extended decoding schemes rely on solving systems of simultaneous equations, and in special cases can uncover documents in buffers that are close to 95 \% full. Finally we note the similarity between our decoding techniques and the ones used to decode rateless codes, and show how such codes can be extracted from encrypted documents },
   6042         www_section = {keywords, privacy},
   6043         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.130.7014},
   6044         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/privsearch-aeolus.pdf},
   6045         author = {George Danezis and Claudia Diaz}
   6046 }
   6047 @conference {2007_12,
   6048         title = {SpoVNet: An Architecture for Supporting Future Internet Applications},
   6049         booktitle = {Proc. 7th W{\"u}rzburg Workshop on IP: Joint EuroFGI and ITG Workshop on Visions of Future Generation Networks'},
   6050         year = {2007},
   6051         address = {W{\"u}rzburg, Germany},
   6052         abstract = {This talk presents an approach for providing Spontaneous Virtual Networks (SpoVNets) that enable flexible, adaptive, and spontaneous provisioning of application-oriented and network-oriented services on top of heterogeneous networks. SpoVNets supply new and uniform communication abstrac-tions for future Internet applications so applications can make use of advanced services not supported by today's Internet. We expect that many functions, which are currently provided by SpoVNet on the application layer will become an integral part of future networks. Thus, SpoVNet will transparently use advanced services from the underlying network infrastructure as they become available (e.g., QoS-support in access networks or multicast in certain ISPs), enabling a seamless transition from current to future genera-tion networks without modifying the applications},
   6053         url = {http://www.tm.uka.de/itm/publications.php?bib=257},
   6054         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SpoVNet.pdf , https://git.gnunet.org/bibliography.git/plain/docs/Mies\%20-\%20SpoVNet.pdf},
   6055         author = {Sebastian Mies}
   6056 }
   6057 @conference {ringstwice07,
   6058         title = {Subliminal Channels in the Private Information Retrieval Protocols},
   6059         booktitle = {Proceedings of the 28th Symposium on Information Theory in the Benelux},
   6060         year = {2007},
   6061         publisher = {Werkgemeenschap voor Informatie- en Communicatietheorie},
   6062         organization = {Werkgemeenschap voor Informatie- en Communicatietheorie},
   6063         address = {Enschede,NL},
   6064         abstract = {Information-theoretic private information retrieval (PIR) protocols, such as those described by Chor et al. [5], provide a mechanism by which users can retrieve information from a database distributed across multiple servers in such a way that neither the servers nor an outside observer can determine the contents of the data being retrieved. More recent PIR protocols also provide protection against Byzantine servers, such that a user can detect when one or more servers have attempted to tamper with the data he has requested. In some cases (as in the protocols presented by Beimel and Stahl [1]), the user can still recover his data and protect the contents of his query if the number of Byzantine servers is below a certain threshold; this property is referred to as Byzantine-recovery. However, tampering with a user's data is not the only goal a Byzantine server might have. We present a scenario in which an arbitrarily sized coalition of Byzantine servers transforms the userbase of a PIR network into a signaling framework with varying levels of detectability by means of a subliminal channel [11]. We describe several such subliminal channel techniques, illustrate several use-cases for this subliminal channel, and demonstrate its applicability to a wide variety of PIR protocols},
   6065         www_section = {private information retrieval},
   6066         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.80.9190},
   6067         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ringstwice07.pdf},
   6068         author = {Meredith L. Patterson and Len Sassaman}
   6069 }
   6070 @conference {Li2007-tcloseness,
   6071         title = {t-Closeness: Privacy Beyond k-Anonymity and $\ell$-Diversity},
   6072         booktitle = {t-Closeness: Privacy Beyond k-Anonymity and $\ell$-Diversity},
   6073         year = {2007},
   6074         pages = {106--115},
   6075         author = {Ninghui Li and Tiancheng Li and Suresh Venkatasubramanian}
   6076 }
   6077 @conference {saballus07distributed,
   6078         title = {Towards a Distributed Java VM in Sensor Networks using Scalable Source Routing},
   6079         booktitle = {6. Fachgespraech Sensornetzwerke der GI/ITG Fachgruppe ''Kommunikation und Verteilte Systeme''},
   6080         year = {2007},
   6081         pages = {47--50},
   6082         address = {Aachen, Germany},
   6083         abstract = {One of the major drawbacks of small embedded systems such as sensor nodes is the need to program in a low level programming language like C or assembler. The resulting code is often unportable, system specific and demands deep knowledge of the hardware details. This paper motivates the use of Java as an alternative programming language. We focus on the tiny AmbiComp Virtual Machine (ACVM) which we currently develop as the main part of a more general Java based development platform for interconnected sensor nodes. This VM is designed to run on different small embedded devices in a distributed network. It uses the novel scalable source routing (SSR) algorithm to distribute and share data and workload. SSR provides key based routing which enables distributed hash table (DHT) structures as a substrate for the VM to disseminate and access remote code and objects. This approach allows all VMs in the network to collaborate. The result looks like one large, distributed VM which supports a subset of the Java language. The ACVM substitutes functionality of an operating system which is missing on the target platform. As this development is work in progress, we outline the ideas behind this approach to provide first insights into the upcoming problems},
   6084         www_section = {distributed hash table, scalable source routing},
   6085         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.7724},
   6086         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saballus07distributed.pdf},
   6087         author = {Bjoern Saballus and Johannes Eickhold and Thomas Fuhrmann}
   6088 }
   6089 @conference {Sherr07towardsapplication-aware,
   6090         title = {Towards application-aware anonymous routing},
   6091         booktitle = {In Second USENIX Workshop on Hot Topics in Security (HotSec},
   6092         year = {2007},
   6093         publisher = {USENIX Association  Berkeley, CA, USA},
   6094         organization = {USENIX Association  Berkeley, CA, USA},
   6095         abstract = {This paper investigates the problem of designing anonymity networks that meet application-specific performance and security constraints. We argue that existing anonymity networks take a narrow view of performance by considering only the strength of the offered anonymity. However, real-world applications impose a myriad of communication requirements, including end-to-end bandwidth and latency, trustworthiness of intermediary routers, and network jitter.
   6096 
   6097 We pose a grand challenge for anonymity: the development of a network architecture that enables applications to customize routes that tradeoff between anonymity and performance. Towards this challenge, we present the Application-Aware Anonymity (A3) routing service. We envision that A3 will serve as a powerful and flexible anonymous communications layer that will spur the future development of anonymity services},
   6098         www_section = {anonymity, routing},
   6099         url = {http://portal.acm.org/citation.cfm?id=1361423},
   6100         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/a3.pdf},
   6101         author = {Micah Sherr and Boon Thau and Matt Blaze}
   6102 }
   6103 @conference {1270971,
   6104         title = {Towards Fair Event Dissemination},
   6105         booktitle = {ICDCSW '07: Proceedings of the 27th International Conference on Distributed Computing Systems Workshops},
   6106         year = {2007},
   6107         pages = {0--63},
   6108         publisher = {IEEE Computer Society},
   6109         organization = {IEEE Computer Society},
   6110         address = {Washington, DC, USA},
   6111         abstract = {Event dissemination in large scale dynamic systems is typically claimed to be best achieved using decentralized peer-to-peer architectures. The rationale is to have every participant in the system act both as a client (information consumer) and as a server (information dissemination enabler), thus, precluding specific brokers which would prevent scalability and fault-tolerance. We argue that, for such decentralized architectures to be really meaningful, participants should serve the system as much as they benefit from it. That is, the system should be fair in the sense that the extend to which a participant acts as a server should depend on the extend to which it has the opportunity to act as a client. This is particularly crucial in selective information dissemination schemes where clients are not all interested in the same information. In this position paper, we discuss what a notion of fairness could look like, explain why current architectures are not fair, and raise several challenges towards achieving fairness},
   6112         isbn = {0-7695-2838-4},
   6113         doi = {10.1109/ICDCSW.2007.83},
   6114         url = {http://portal.acm.org/citation.cfm?id=1270388.1270971$\#$},
   6115         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.90.9758.pdf},
   6116         author = {Baehni, Sebastien and Rachid Guerraoui and Boris Koldehofe and Monod, Maxime}
   6117 }
   6118 @conference {troncoso-ih2007,
   6119         title = {Traffic Analysis Attacks on a Continuously-Observable Steganographic File System},
   6120         booktitle = {Proceedings of Information Hiding Workshop (IH 2007)},
   6121         series = {Lecture Notes in Computer Science},
   6122         volume = {4567},
   6123         year = {2007},
   6124         month = {June},
   6125         pages = {220--236},
   6126         publisher = {Springer-Verlag},
   6127         organization = {Springer-Verlag},
   6128         address = {Saint-Malo,FR},
   6129         abstract = {A continuously-observable steganographic file system allows to remotely store user files on a raw storage device; the security goal is to offer plausible deniability even when the raw storage device is continuously monitored by an attacker. Zhou, Pang and Tan have proposed such a system in [7] with a claim of provable security against traffic analysis. In this paper, we disprove their claims by presenting traffic analysis attacks on the file update algorithm of Zhou et al. Our attacks are highly effective in detecting file updates and revealing the existence and location of files. For multi-block files, we show that two updates are sufficient to discover the file. One-block files accessed a sufficient number of times can also be revealed. Our results suggest that simple randomization techniques are not sufficient to protect steganographic file systems from traffic analysis attacks },
   6130         www_section = {traffic analysis},
   6131         isbn = {978-3-540-77369-6},
   6132         doi = {10.1007/978-3-540-77370-2},
   6133         url = {http://www.springerlink.com/content/h5r4j539833k1k78/},
   6134         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/troncoso-ih2007.pdf},
   6135         author = {Carmela Troncoso and Claudia Diaz and Orr Dunkelman and Bart Preneel}
   6136 }
   6137 @conference {danezis-pet2007,
   6138         title = {Two-Sided Statistical Disclosure Attack},
   6139         booktitle = {Proceedings of the Seventh Workshop on Privacy Enhancing Technologies (PET 2007)},
   6140         year = {2007},
   6141         month = {June},
   6142         publisher = {Springer},
   6143         organization = {Springer},
   6144         address = {Ottawa, Canada},
   6145         abstract = { We introduce a new traffic analysis attack: the Two-sided Statistical Disclosure Attack, that tries to uncover the receivers of messages sent through an anonymizing network supporting anonymous replies. We provide an abstract model of an anonymity system with users that reply to messages. Based on this model, we propose a linear approximation describing the likely receivers of sent messages. Using simulations, we evaluate the new attack given different traffic characteristics and we show that it is superior to previous attacks when replies are routed in the system },
   6146         www_section = {anonymity, traffic analysis},
   6147         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.78.7347},
   6148         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2007.pdf},
   6149         author = {George Danezis and Claudia Diaz and Carmela Troncoso},
   6150         editor = {Borisov, Nikita and Philippe Golle}
   6151 }
   6152 @conference {2007_13,
   6153         title = {An Unconditionally Secure Protocol for Multi-Party Set Intersection},
   6154         booktitle = {Proceedings of the 5th International Conference on Applied Cryptography and Network Security},
   6155         year = {2007},
   6156         publisher = {Springer-Verlag},
   6157         organization = {Springer-Verlag},
   6158         address = {Berlin, Heidelberg},
   6159         abstract = {Existing protocols for private set intersection are based on homomorphic public-key encryption and the technique of representing sets as polynomials in the cryptographic model. Based on the ideas of these protocols and the two-dimensional verifiable secret sharing scheme, we propose a protocol for private set intersection in the information-theoretic model. By representing the sets as polynomials, the set intersection problem is converted into the task of computing the common roots of the polynomials. By sharing the coefficients of the polynomials among parties, the common roots can be computed out using the shares. As long as more than 2<em>n</em>/3 parties are semi-honest, our protocol correctly computes the intersection of <em>n</em>sets, and reveals no other information than what is implied by the intersection and the secrets sets controlled by the active adversary. This is the first specific protocol for private set intersection in the information-theoretic model as far as we know},
   6160         www_section = {privacy-preserving set ntersection, secure multi-party computation, unconditional security},
   6161         isbn = {978-3-540-72737-8},
   6162         doi = {10.1007/978-3-540-72738-5_15},
   6163         url = {http://dx.doi.org/10.1007/978-3-540-72738-5_15},
   6164         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UnconditionallySecureProtocol2007Li.pdf},
   6165         author = {Li, Ronghua and Wu, Chuankun}
   6166 }
   6167 @conference {tor-soups07,
   6168         title = {Usability of anonymous web browsing: an examination of Tor interfaces and deployability},
   6169         booktitle = {Proceedings of the 3rd Symposium on Usable Privacy and Security (SOUPS '07)},
   6170         year = {2007},
   6171         month = {July},
   6172         pages = {41--51},
   6173         publisher = {ACM},
   6174         organization = {ACM},
   6175         address = {New York, NY, USA},
   6176         abstract = {Tor is a popular privacy tool designed to help achieve online anonymity by anonymising web traffic. Employing cognitive walkthrough as the primary method, this paper evaluates four competing methods of deploying Tor clients, and a number of software tools designed to be used in conjunction with Tor: Vidalia, Privoxy, Torbutton, and FoxyProxy. It also considers the standalone anonymous browser TorPark. Our results show that none of the deployment options are fully satisfactory from a usability perspective, but we offer suggestions on how to incorporate the best aspects of each tool. As a framework for our usability evaluation, we also provide a set of guidelines for Tor usability compiled and adapted from existing work on usable security and human-computer interaction},
   6177         www_section = {anonymity, onion routing, privacy, Tor, usable security},
   6178         isbn = {978-1-59593-801-5},
   6179         doi = {10.1145/1280680.1280687},
   6180         url = {http://portal.acm.org/citation.cfm?id=1280680.1280687},
   6181         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tor-soups07.pdf},
   6182         author = {Jeremy Clark and Paul C. van Oorschot and Carlisle Adams}
   6183 }
   6184 @conference {kutzner07linearization,
   6185         title = {Using Linearization for Global Consistency in SSR},
   6186         booktitle = {Proceedings of the 4th Int. IEEE Workshop on Hot Topics in P2P Systems},
   6187         year = {2007},
   6188         type = {publication},
   6189         address = {Long Beach, CA},
   6190         abstract = {Novel routing algorithms such as scalable source routing (SSR) and virtual ring routing (VRR) need to set up and maintain a virtual ring structure among all the nodes in the network. The iterative successor pointer rewiring protocol (ISPRP) is one way to bootstrap such a network. Like its VRR-analogon, ISPRP requires one of the nodes to flood the network to guarantee consistency. Recent results on self-stabilizing algorithms now suggest a new approach to bootstrap the virtual rings of SSR and VRR. This so-called linearization method does not require any flooding at all. Moreover, it has been shown that linearization with shortcut neighbors has on average polylogarithmic convergence time, only},
   6191         www_section = {scalable source routing},
   6192         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   6193         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner07linearization.pdf},
   6194         author = {Kendy Kutzner and Thomas Fuhrmann}
   6195 }
   6196 @article {1250746,
   6197         title = {Valgrind: a framework for heavyweight dynamic binary instrumentation},
   6198         journal = {SIGPLAN Not},
   6199         volume = {42},
   6200         number = {6},
   6201         year = {2007},
   6202         pages = {89--100},
   6203         publisher = {ACM},
   6204         address = {New York, NY, USA},
   6205         abstract = {Dynamic binary instrumentation (DBI) frameworks make it easy to build dynamic binary analysis (DBA) tools such as checkers and profilers. Much of the focus on DBI frameworks has been on performance; little attention has been paid to their capabilities. As a result, we believe the potential of DBI has not been fully exploited.
   6206 
   6207 In this paper we describe Valgrind, a DBI framework designed for building heavyweight DBA tools. We focus on its unique support for shadow values-a powerful but previously little-studied and difficult-to-implement DBA technique, which requires a tool to shadow every register and memory value with another value that describes it. This support accounts for several crucial design features that distinguish Valgrind from other DBI frameworks. Because of these features, lightweight tools built with Valgrind run comparatively slowly, but Valgrind can be used to build more interesting, heavyweight tools that are difficult or impossible to build with other DBI frameworks such as Pin and DynamoRIO},
   6208         www_section = {dynamic binary instrumentation},
   6209         issn = {0362-1340},
   6210         doi = {10.1145/1273442.1250746},
   6211         url = {http://portal.acm.org/citation.cfm?id=1250746},
   6212         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.4263.pdf},
   6213         author = {Nethercote, Nicholas and Seward, Julian}
   6214 }
   6215 @article {2007_14,
   6216         title = {Vielleicht anonym? Die Enttarnung von StealthNet-Nutzern},
   6217         journal = {c't magazin f{\"u}r computer technik},
   6218         year = {2007},
   6219         type = {Report},
   6220         www_section = {anonymity, file-sharing, Rshare, Stealthnet},
   6221         url = {http://www.heise.de/kiosk/archiv/ct/2007/21/218_Die-Enttarnung-von-StealthNet-Nutzern},
   6222         author = {Nils Durner and Nathan S Evans and Christian Grothoff}
   6223 }
   6224 @conference {garbacki062fast,
   6225         title = {2Fast: Collaborative Downloads in P2P Networks},
   6226         booktitle = {P2P 2006. 6th IEEE International Conference on Peer-to-Peer Computing},
   6227         year = {2006},
   6228         month = sep,
   6229         publisher = {IEEE Computer Society},
   6230         organization = {IEEE Computer Society},
   6231         address = {Cambridge, UK},
   6232         abstract = {P2P systems that rely on the voluntary contribution of bandwidth by the individual peers may suffer from free riding. To address this problem, mechanisms enforcing fairness in bandwidth sharing have been designed, usually by limiting the download bandwidth to the available upload bandwidth. As in real environments the latter is much smaller than the former, these mechanisms severely affect the download performance of most peers. In this paper we propose a system called 2Fast, which solves this problem while preserving the fairness of bandwidth sharing. In 2Fast, we form groups of peers that collaborate in downloading a file on behalf of a single group member, which can thus use its full download bandwidth. A peer in our system can use its currently idle bandwidth to help other peers in their ongoing downloads, and get in return help during its own downloads. We assess the performance of 2Fast analytically and experimentally, the latter in both real and simulated environments. We find that in realistic bandwidth limit settings, 2Fast improves the download speed by up to a factor of 3.5 in comparison to state-of-the-art P2P download protocols},
   6233         www_section = {2fast, bandwidth sharing, collaborative download, free-riding, P2P, p2p network, peer-to-peer networking},
   6234         isbn = {0-7695-2679-9 },
   6235         doi = {10.1109/P2P.2006.1  },
   6236         url = {http://www.arnetminer.org/viewpub.do?pid=525534},
   6237         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Computer\%20Society\%20-\%202Fast.pdf},
   6238         author = {Garbacki, Pawel and Alexandru Iosup and Epema, Dick H. J. and van Steen, Maarten}
   6239 }
   6240 @mastersthesis {2006_0,
   6241         title = {Access Control in Peer-to-Peer Storage Systems},
   6242         volume = {Communication Systems},
   6243         year = {2006},
   6244         month = oct,
   6245         pages = {0--159},
   6246         school = {Eidgen{\"o}ssische Technische Hochschule Z{\"u}rich (ETH)},
   6247         type = {Master's Thesis},
   6248         address = {Zurich, Switzerland},
   6249         www_section = {access control, peer-to-peer storage system},
   6250         url = {http://webcache.googleusercontent.com/u/ethweb?oe=utf8\&GO.x=0\&GO.y=0\&hl=es\&q=cache:7sJLnyzj1TcJ:http://www.zisc.ethz.ch/events/ISC20067Slides/MA_Report_Erol_Koc.pdf+Erol+Ko\%C3\%A7\&ct=clnk},
   6251         author = {Erol Ko{\c c}}
   6252 }
   6253 @article {Kumar2006Algorithms_0,
   6254         title = {Algorithms to accelerate multiple regular expressions matching for deep packet inspection},
   6255         journal = {SIGCOMM Comput. Commun. Rev},
   6256         volume = {36},
   6257         number = {4},
   6258         year = {2006},
   6259         pages = {339--350},
   6260         publisher = {ACM},
   6261         address = {New York, NY, USA},
   6262         www_section = {deep packet inspection, DFA, regular expressions},
   6263         issn = {0146-4833},
   6264         doi = {10.1145/1151659.1159952},
   6265         url = {http://doi.acm.org/10.1145/1151659.1159952},
   6266         author = {Kumar, Sailesh and Dharmapurikar, Sarang and Yu, Fang and Crowley, Patrick and Turner, Jonathan}
   6267 }
   6268 @article {Kumar2006Algorithms,
   6269         title = {Algorithms to accelerate multiple regular expressions matching for deep packet inspection},
   6270         journal = {SIGCOMM Comput. Commun. Rev},
   6271         volume = {36},
   6272         number = {4},
   6273         year = {2006},
   6274         pages = {339--350},
   6275         publisher = {ACM},
   6276         address = {New York, NY, USA},
   6277         www_section = {deep packet inspection, DFA, regular expressions},
   6278         issn = {0146-4833},
   6279         doi = {10.1145/1151659.1159952},
   6280         url = {http://doi.acm.org/10.1145/1151659.1159952},
   6281         author = {Kumar, Sailesh and Dharmapurikar, Sarang and Yu, Fang and Crowley, Patrick and Turner, Jonathan}
   6282 }
   6283 @conference {usability:weis2006,
   6284         title = {Anonymity Loves Company: Usability and the Network Effect},
   6285         booktitle = {Proceedings of the Fifth Workshop on the Economics of Information Security (WEIS 2006)},
   6286         year = {2006},
   6287         month = {June},
   6288         address = {Cambridge, UK},
   6289         abstract = {A growing field of literature is studying how usability impacts security [4]. One class of security software is anonymizing networks--- overlay networks on the Internet that provide privacy by letting users transact (for example, fetch a web page or send an email) without revealing their communication partners.
   6290 In this position paper we focus on the network effects of usability on privacy and security: usability is a factor as before, but the size of the user base also becomes a factor. We show that in anonymizing networks, even if you were smart enough and had enough time to use every system perfectly, you would nevertheless be right to choose your system based in part on its usability for other users},
   6291         www_section = {anonymity, privacy},
   6292         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.510},
   6293         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/usability-weis2006.pdf},
   6294         author = {Roger Dingledine and Nick Mathewson},
   6295         editor = {Ross Anderson}
   6296 }
   6297 @article {chatzikokolakis2006apn,
   6298         title = {Anonymity Protocols as Noisy Channels?},
   6299         journal = {Proc. 2nd Symposium on Trustworthy Global Computing, LNCS. Springer},
   6300         volume = {4661/2007},
   6301         year = {2006},
   6302         pages = {281--300},
   6303         abstract = {We propose a framework in which anonymity protocols are interpreted as particular kinds of channels, and the degree of anonymity provided by the protocol as the converse of the channel's capacity. We also investigate how the adversary can test the system to try to infer the user's identity, and we study how his probability of success depends on the characteristics of the channel. We then illustrate how various notions of anonymity can be expressed in this framework, and show the relation with some definitions of probabilistic anonymity in literature.
   6304 This work has been partially supported by the INRIA DREI {\'E}quipe Associ{\'e}e PRINTEMPS. The work of Konstantinos Chatzikokolakis and Catuscia Palamidessi has been also supported by the INRIA ARC project ProNoBiS},
   6305         www_section = {anonymity},
   6306         isbn = {978-3-540-75333-9},
   6307         issn = {0302-9743},
   6308         doi = {10.1007/978-3-540-75336-0},
   6309         url = {http://www.springerlink.com/content/04247873k1719274/},
   6310         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.79.4460.pdf},
   6311         author = {Konstantinos Chatzikokolakis and Catuscia Palamidessi and Prakash Panangaden}
   6312 }
   6313 @conference {Goyal:2006:AEF:1180405.1180418,
   6314         title = {Attribute-based encryption for fine-grained access control of encrypted data},
   6315         booktitle = {CCS'06--Proceedings of the 13th ACM Conference on Computer and Communications Security},
   6316         series = {CCS '06},
   6317         year = {2006},
   6318         month = oct,
   6319         pages = {89--98},
   6320         publisher = {ACM},
   6321         organization = {ACM},
   6322         address = {Alexandria, VA, USA},
   6323         abstract = {As more sensitive data is shared and stored by third-party sites on the Internet, there will be a need to encrypt data stored at these sites. One drawback of encrypting data, is that it can be selectively shared only at a coarse-grained level (i.e., giving another party your private key). We develop a new cryptosystem for fine-grained sharing of encrypted data that we call Key-Policy Attribute-Based Encryption (KP-ABE). In our cryptosystem, ciphertexts are labeled with sets of attributes and private keys are associated with access structures that control which ciphertexts a user is able to decrypt. We demonstrate the applicability of our construction to sharing of audit-log information and broadcast encryption. Our construction supports delegation of private keys which subsumesHierarchical Identity-Based Encryption (HIBE)},
   6324         www_section = {access control, attribute-based encryption, audit logs, broadcast encryption, delegation, hierarchical identity-based encryption},
   6325         isbn = {1-59593-518-5},
   6326         doi = {http://doi.acm.org/10.1145/1180405.1180418},
   6327         url = {http://doi.acm.org/10.1145/1180405.1180418},
   6328         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2706\%20-\%20Attributed-based\%20encryption\%20for\%20fine-grained\%20access\%20control\%20of\%20encrypted\%20data.pdf},
   6329         author = {Goyal, Vipul and Pandey, Omkant and Amit Sahai and Waters, Brent}
   6330 }
   6331 @conference {alpha-mixing:pet2006,
   6332         title = {Blending Different Latency Traffic with Alpha-Mixing},
   6333         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   6334         year = {2006},
   6335         month = {June},
   6336         pages = {245--257},
   6337         publisher = {Springer},
   6338         organization = {Springer},
   6339         address = {Cambridge, UK},
   6340         abstract = {Currently fielded anonymous communication systems either introduce too much delay and thus have few users and little security, or have many users but too little delay to provide protection against large attackers. By combining the user bases into the same network, and ensuring that all traffic is mixed together, we hope to lower delay and improve anonymity for both sets of users.
   6341 Alpha-mixing is an approach that can be added to traditional batching strategies to let senders specify for each message whether they prefer security or speed. Here we describe how to add alpha-mixing to various mix designs, and show that mix networks with this feature can provide increased anonymity for all senders in the network. Along the way we encounter subtle issues to do with the attacker's knowledge of the security parameters of the users},
   6342         www_section = {anonymity},
   6343         isbn = {978-3-540-68790-0},
   6344         doi = {10.1007/11957454},
   6345         url = {http://www.springerlink.com/content/m23510526727k317/},
   6346         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/alpha-mixing-pet2006.pdf},
   6347         author = {Roger Dingledine and Andrei Serjantov and Paul Syverson},
   6348         editor = {George Danezis and Philippe Golle}
   6349 }
   6350 @conference {cramer06bootstrapping,
   6351         title = {Bootstrapping Chord in Ad hoc Networks: Not Going Anywhere for a While},
   6352         booktitle = {Proceedings of the 3rd IEEE International Workshop on Mobile Peer-to-Peer Computing},
   6353         year = {2006},
   6354         type = {publication},
   6355         address = {Pisa, Italy},
   6356         abstract = {With the growing prevalence of wireless devices, infrastructure-less ad hoc networking is coming closer to reality. Research in this field has mainly been concerned with routing. However, to justify the relevance of ad hoc networks, there have to be applications. Distributed applications require basic services such as naming. In an ad hoc network, these services have to be provided in a decentralized way. We believe that structured peer-to-peer overlays are a good basis for their design. Prior work has been focused on the long-run performance of virtual peer-to-peer overlays over ad hoc networks. In this paper, we consider a vital functionality of any peer-to-peer network: bootstrapping. We formally show that the self-configuration process of a spontaneously deployed Chord network has a time complexity linear in the network size. In addition to that, its centralized bootstrapping procedure causes an unfavorable traffic load imbalance},
   6357         www_section = {ad-hoc networks, overlay networks, traffic analysis},
   6358         doi = {10.1109/PERCOMW.2006.28},
   6359         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   6360         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer06bootstrapping.pdf},
   6361         author = {Cramer, Curt and Thomas Fuhrmann}
   6362 }
   6363 @conference {UREbreak06,
   6364         title = {Breaking Four Mix-related Schemes Based on Universal Re-encryption},
   6365         booktitle = {Proceedings of Information Security Conference 2006},
   6366         year = {2006},
   6367         month = {September},
   6368         publisher = {Springer-Verlag},
   6369         organization = {Springer-Verlag},
   6370         abstract = {Universal Re-encryption allows El-Gamal ciphertexts to be re-encrypted without knowledge of their corresponding public keys. This has made it an enticing building block for anonymous communications protocols. In this work we analyze four schemes related to mix networks that make use of Universal Re-encryption and find serious weaknesses in all of them. Universal Re-encryption of signatures is open to existential forgery; two-mix schemes can be fully compromised by a passive adversary observing a single message close to the sender; the fourth scheme, the rWonGoo anonymous channel, turns out to be less secure than the original Crowds scheme, on which it is based. Our attacks make extensive use of unintended {\textquotedblleft}services{\textquotedblright} provided by the network nodes acting as decryption and re-routing oracles. Finally, our attacks against rWonGoo demonstrate that anonymous channels are not automatically composable: using two of them in a careless manner makes the system more vulnerable to attack},
   6371         www_section = {traffic analysis, universal re-encryption},
   6372         doi = {10.1007/s10207-007-0033-y},
   6373         url = {http://www.springerlink.com/content/x038u85171776236/},
   6374         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UREbreak06.pdf},
   6375         author = {George Danezis}
   6376 }
   6377 @conference {morphmix:pet2006,
   6378         title = {Breaking the Collusion Detection Mechanism of MorphMix},
   6379         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   6380         year = {2006},
   6381         month = {June},
   6382         pages = {368--384},
   6383         publisher = {Springer},
   6384         organization = {Springer},
   6385         address = {Cambridge, UK},
   6386         abstract = {MorphMix is a peer-to-peer circuit-based mix network designed to provide low-latency anonymous communication. MorphMix nodes incrementally construct anonymous communication tunnels based on recommendations from other nodes in the system; this P2P approach allows it to scale to millions of users. However, by allowing unknown peers to aid in tunnel construction, MorphMix is vulnerable to colluding attackers that only offer other attacking nodes in their recommendations. To avoid building corrupt tunnels, MorphMix employs a collusion detection mechanism to identify this type of misbehavior. In this paper, we challenge the assumptions of the collusion detection mechanism and demonstrate that colluding adversaries can compromise a significant fraction of all anonymous tunnels, and in some cases, a majority of all tunnels built. Our results suggest that mechanisms based solely on a node's local knowledge of the network are not sufficient to solve the difficult problem of detecting colluding adversarial behavior in a P2P system and that more sophisticated schemes may be needed},
   6387         www_section = {collusion detection, P2P},
   6388         isbn = {978-3-540-68790-0},
   6389         doi = {10.1007/11957454},
   6390         url = {http://www.springerlink.com/content/p2612108665331n7/},
   6391         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphmix-pet2006.pdf},
   6392         author = {Parisa Tabriz and Borisov, Nikita},
   6393         editor = {George Danezis and Philippe Golle}
   6394 }
   6395 @article {1159937,
   6396         title = {Building an AS-topology model that captures route diversity},
   6397         journal = {SIGCOMM Comput. Commun. Rev},
   6398         volume = {36},
   6399         number = {4},
   6400         year = {2006},
   6401         pages = {195--206},
   6402         publisher = {ACM},
   6403         address = {New York, NY, USA},
   6404         abstract = {An understanding of the topological structure of the Internet is needed for quite a number of networking tasks, e. g., making decisions about peering relationships, choice of upstream providers, inter-domain traffic engineering. One essential component of these tasks is the ability to predict routes in the Internet. However, the Internet is composed of a large number of independent autonomous systems (ASes) resulting in complex interactions, and until now no model of the Internet has succeeded in producing predictions of acceptable accuracy.We demonstrate that there are two limitations of prior models: (i) they have all assumed that an Autonomous System (AS) is an atomic structure--it is not, and (ii) models have tended to oversimplify the relationships between ASes. Our approach uses multiple quasi-routers to capture route diversity within the ASes, and is deliberately agnostic regarding the types of relationships between ASes. The resulting model ensures that its routing is consistent with the observed routes. Exploiting a large number of observation points, we show that our model provides accurate predictions for unobserved routes, a first step towards developing structural mod-els of the Internet that enable real applications},
   6405         www_section = {border gateway protocol, inter-domain routing, route diversity, routing},
   6406         issn = {0146-4833},
   6407         doi = {10.1145/1151659.1159937},
   6408         url = {http://portal.acm.org/citation.cfm?id=1159937$\#$},
   6409         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BuildingAnASTopologyModel.pdf},
   6410         author = {M{\"u}hlbauer, Wolfgang and Feldmann, Anja and Maennel, Olaf and Roughan, Matthew and Uhlig, Steve}
   6411 }
   6412 @conference {thiele06debruijn,
   6413         title = {Churn Resistant de Bruijn Networks for Wireless on Demand Systems},
   6414         booktitle = {Proceedings of the Third Annual Conference on Wireless On demand Network Systems and Services},
   6415         year = {2006},
   6416         type = {publication},
   6417         address = {Les M{\'e}nuires, France},
   6418         abstract = {Wireless on demand systems typically need authentication, authorization and accounting (AAA) services. In a peer-to-peer (P2P) environment these AAA-services need to be provided in a fully decentralized manner. This excludes many cryptographic approaches since they need and rely on a central trusted instance. One way to accomplish AAA in a P2P manner are de Bruijn-networks, since there data can be routed over multiple non-overlapping paths, thereby hampering malicious nodes from manipulation that data. Originally, de Bruijn-networks required a rather fixed network structure which made them unsuitable for wireless networks. In this paper we generalize de Bruijn-networks to an arbitrary number of nodes while keeping all their desired properties. This is achieved by decoupling link degree and character set of the native de Bruijn graph. Furthermore we describe how this makes the resulting network resistant against node churn},
   6419         www_section = {authentication, P2P},
   6420         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   6421         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thiele06debruijn.pdf},
   6422         author = {Manuel Thiele and Kendy Kutzner and Thomas Fuhrmann}
   6423 }
   6424 @article {adams06,
   6425         title = {A Classification for Privacy Techniques},
   6426         journal = {University of Ottawa Law \& Technology Journal},
   6427         volume = {3},
   6428         year = {2006},
   6429         pages = {35--52},
   6430         abstract = {This paper proposes a classification for techniques that encourage, preserve, or enhance privacy in online environments. This classification encompasses both automated mechanisms (those that exclusively or primarily use computers and software to implement privacy techniques) and nonautomated mechanisms (those that exclusively or primarily use human means to implement privacy techniques). We give examples of various techniques and show where they fit within this classification. The importance of such a classification is discussed along with its use as a tool for the comparison and evaluation of privacy techniques},
   6431         www_section = {privacy},
   6432         url = {http://papers.ssrn.com/sol3/papers.cfm?abstract_id=999672},
   6433         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/adams06.pdf},
   6434         author = {Carlisle Adams}
   6435 }
   6436 @conference {2006_1,
   6437         title = {Combating Hidden Action in Unstructured Peer-to-Peer Systems},
   6438         booktitle = {ChinaCom '06. First International Conference on Communications and Networking in China},
   6439         year = {2006},
   6440         month = oct,
   6441         pages = {1--5},
   6442         publisher = {IEEE Computer Society},
   6443         organization = {IEEE Computer Society},
   6444         address = {Beijing, China},
   6445         abstract = {In unstructured peer-to-peer systems, cooperation by the intermediate peers are essential for the success of queries. However, intermediate peers may choose to forward packets at a low priority or not forward the packets at all, which is referred as peers' hidden action. Hidden action may lead to significant decrement of search efficiency. In contrast to building a global system with reputations or economics, we proposed MSSF, an improved search method, to help queries route around the peers with hidden action. MSSF does not need to check other peers' behavior. It automatically adapts to change query routes according to the previous query results. Simulation results show that MSSF is more robust than Gnutella flooding when peers with hidden action increase},
   6446         www_section = {cooperation, hidden action, unstructured peer-to-peer system},
   6447         isbn = {1-4244-0463-0 },
   6448         doi = {http://dx.doi.org/10.1109/CHINACOM.2006.344762},
   6449         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ChinaCom\%2706\%20-\%20Combating\%20hidden\%20action\%20in\%20unstructured\%20p2p\%20systems.pdf},
   6450         author = {Qi Zhao and Jianzhong Zhang and Jingdong Xu}
   6451 }
   6452 @book {2006_2,
   6453         title = {Combinatorial Auctions},
   6454         year = {2006},
   6455         pages = {0--649},
   6456         publisher = {MIT Press},
   6457         organization = {MIT Press},
   6458         address = {Cambridge, MA},
   6459         abstract = {The study of combinatorial auctions -- auctions in which bidders can bid on combinations of items or "packages" -- draws on the disciplines of economics, operations research, and computer science. This landmark collection integrates these three perspectives, offering a state-of-the art survey of developments in combinatorial auction theory and practice by leaders in the field.Combinatorial auctions (CAs), by allowing bidders to express their preferences more fully, can lead to improved economic efficiency and greater auction revenues. However, challenges arise in both design and implementation. Combinatorial Auctions addresses each of these challenges. After describing and analyzing various CA mechanisms, the book addresses bidding languages and questions of efficiency. Possible strategies for solving the computationally intractable problem of how to compute the objective-maximizing allocation (known as the winner determination problem) are considered, as are questions of how to test alternative algorithms. The book discusses five important applications of CAs: spectrum auctions, airport takeoff and landing slots, procurement of freight transportation services, the London bus routes market, and industrial procurement. This unique collection makes recent work in CAs available to a broad audience of researchers and practitioners. The integration of work from the three disciplines underlying CAs, using a common language throughout, serves to advance the field in theory and practice},
   6460         www_section = {combinatorial auctions, winner determination problem},
   6461         isbn = {0262033429},
   6462         issn = {978-0262033428},
   6463         url = {http://works.bepress.com/cramton/35},
   6464         author = {Peter Cramton and Yoav Shoham and Richard Steinberg}
   6465 }
   6466 @book {2006_3,
   6467         title = {Combining Virtual and Physical Structures for Self-organized Routing},
   6468         booktitle = {Self-Organizing Systems},
   6469         series = {Lecture Notes in Computer Science},
   6470         volume = {Volume 4124/2006},
   6471         year = {2006},
   6472         abstract = {Our recently proposed scalable source routing (SSR) protocol combines source routing in the physical network with Chord-like routing in the virtual ring that is formed by the address space. Thereby, SSR provides self-organized routing in large unstructured networks of resource-limited devices. Its ability to quickly adapt to changes in the network topology makes it suitable not only for sensor-actuator networks but also for mobile ad-hoc networks. Moreover, SSR directly provides the key-based routing semantics, thereby making it an efficient basis for the scalable implementation of self-organizing, fully decentralized applications.
   6473 In this paper we review SSR's self-organizing features and demonstrate how the combination of virtual and physical structures leads to emergence of stability and efficiency. In particular, we focus on SSR's resistance against node churn. Following the principle of combining virtual and physical structures, we propose an extension that stabilizes SSR in face of heavy node churn. Simulations demonstrate the effectiveness of this extension},
   6474         www_section = {Chord, scalable source routing, self-organization},
   6475         issn = {978-3-540-37658-3},
   6476         doi = {10.1007/11822035},
   6477         url = {http://www.springerlink.com/content/4540535t4v2g2548/},
   6478         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Combining\%20Virtual\%20and\%20Physical\%20Structures\%20for\%20Self-organized\%20Routing_0.pdf},
   6479         publisher = {unknown},
   6480         author = {Thomas Fuhrmann}
   6481 }
   6482 @article {2006_4,
   6483         title = {Communication Networks On the fundamental communication abstraction supplied by P2P overlay networks},
   6484         year = {2006},
   6485         abstract = {The disruptive advent of peer-to-peer (P2P) file sharing in 2000 attracted significant interest. P2P networks have matured from their initial form, unstructured overlays, to structured overlays like distributed hash tables (DHTs), which are considered state-of-the-art. There are huge efforts to improve their performance. Various P2P applications like distributed storage and application-layer multicast were proposed. However, little effort was spent to understand the communication abstraction P2P overlays supply. Only when it is understood, the reach of P2P ideas will significantly broaden. Furthermore, this clarification reveals novel approaches and highlights future directions. In this paper, we reconsider well-known P2P overlays, linking them to insights from distributed systems research. We conclude that the main communication abstraction is that of a virtual address space or application-specific naming. On this basis, P2P systems build a functional layer implementing, for example lookup, indirection and distributed processing. Our insights led us to identify interesting and unexplored points in the design space},
   6486         www_section = {distributed hash table, P2P},
   6487         journal = {unknown},
   6488         url = {http://www3.interscience.wiley.com/journal/109858517/abstract},
   6489         author = {Cramer Curt and Thomas Fuhrmann}
   6490 }
   6491 @conference {1267366,
   6492         title = {Compare-by-hash: a reasoned analysis},
   6493         booktitle = {ATEC '06: Proceedings of the annual conference on USENIX '06 Annual Technical Conference},
   6494         year = {2006},
   6495         pages = {7--7},
   6496         publisher = {USENIX Association},
   6497         organization = {USENIX Association},
   6498         address = {Berkeley, CA, USA},
   6499         abstract = {Compare-by-hash is the now-common practice used by systems designers who assume that when the digest of a cryptographic hash function is equal on two distinct files, then those files are identical. This approach has been used in both real projects and in research efforts (for example rysnc [16] and LBFS [12]). A recent paper by Henson criticized this practice [8]. The present paper revisits the topic from an advocate's standpoint: we claim that compare-by-hash is completely reasonable, and we offer various arguments in support of this viewpoint in addition to addressing concerns raised by Henson},
   6500         url = {http://portal.acm.org/citation.cfm?id=1267366$\#$},
   6501         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.125.4474.pdf},
   6502         author = {Black, John}
   6503 }
   6504 @article {2006_5,
   6505         title = {Complementary currency innovations: Self-guarantee in peer-to-peer currencies},
   6506         journal = {International Journal of Community Currency Research},
   6507         volume = {10},
   6508         year = {2006},
   6509         month = jan,
   6510         pages = {1--7},
   6511         abstract = {The WAT system, as used in Japan, allows for businesses to issue their own tickets (IOU's) which can circulate as a complementary currency within a community. This paper proposes a variation on that model, where the issuer of a ticket can offer a guarantee, in the form of some goods or services. The difference in value, along with a reasonable acceptance that the issuer is capable of delivering the service or goods, allows for a higher degree of confidence in the ticket, and therefore a greater liquidity},
   6512         www_section = {guarantee, peer-to-peer currencies},
   6513         issn = {1325-9547},
   6514         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IJCCR\%20vol\%2010\%20\%282006\%29\%201\%20Ardron\%20and\%20Lietaer.pdf},
   6515         author = {Mitra Ardron and Bernard Lietaer}
   6516 }
   6517 @article {ishai2006ca,
   6518         title = {Cryptography from Anonymity},
   6519         journal = {Proceedings of the 47th Annual IEEE Symposium on Foundations of Computer Science (FOCS'06)-Volume 00},
   6520         year = {2006},
   6521         pages = {239--248},
   6522         publisher = {IEEE Computer Society Washington, DC, USA},
   6523         abstract = {There is a vast body of work on implementing anonymous communication. In this paper, we study the possibility of using anonymous communication as a building block, and show that one can leverage on anonymity in a variety of cryptographic contexts. Our results go in two directions.--Feasibility. We show that anonymous communication over insecure channels can be used to implement unconditionally secure point-to-point channels, broadcast, and generalmulti-party protocols that remain unconditionally secure as long as less than half of the players are maliciously corrupted.--Efficiency. We show that anonymous channels can yield substantial efficiency improvements for several natural secure computation tasks. In particular, we present the first solution to the problem of private information retrieval (PIR) which can handle multiple users while being close to optimal with respect to both communication and computation.A key observation that underlies these results is that local randomization of inputs, via secret-sharing, when combined with the global mixing of the shares, provided by anonymity, allows to carry out useful computations on the inputs while keeping the inputs private},
   6524         www_section = {anonymity, private information retrieval},
   6525         isbn = {0-7695-2720-5},
   6526         issn = {0272-5428},
   6527         doi = {10.1109/FOCS.2006.25},
   6528         url = {http://portal.acm.org/citation.cfm?id=1170505},
   6529         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ishai2006ca.pdf},
   6530         author = {Yuval Ishai and Eyal Kushilevitz and Rafail Ostrovsky and Amit Sahai}
   6531 }
   6532 @conference {Grolimund:2006:CFT:1173705.1174355,
   6533         title = {Cryptree: A Folder Tree Structure for Cryptographic File Systems},
   6534         booktitle = {SRDS'06--Proceedings of the 25th IEEE Symposium on Reliable Distributed Systems},
   6535         year = {2006},
   6536         month = oct,
   6537         pages = {189--198},
   6538         publisher = {IEEE Computer Society},
   6539         organization = {IEEE Computer Society},
   6540         address = {Leeds, UK},
   6541         abstract = {We present Cryptree, a cryptographic tree structure which facilitates access control in file systems operating on untrusted storage. Cryptree leverages the file system's folder hierarchy to achieve efficient and intuitive, yet simple, access control. The highlights are its ability to recursively grant access to a folder and all its subfolders in constant time, the dynamic inheritance of access rights which inherently prevents scattering of access rights, and the possibility to grant someone access to a file or folder without revealing the identities of other accessors. To reason about and to visualize Cryptree, we introduce the notion of cryptographic links. We describe the Cryptrees we have used to enforce read and write access in our own file system. Finally, we measure the performance of the Cryptree and compare it to other approaches},
   6542         www_section = {cryptographic tree structure, cryptree, hierarchy, untrusted storage},
   6543         isbn = {0-7695-2677-2},
   6544         doi = {http://dx.doi.org/10.1109/SRDS.2006.15},
   6545         url = {http://dl.acm.org/citation.cfm?id=1173705.1174355},
   6546         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SRDS\%2706\%20-\%20Cryptree.pdf},
   6547         author = {Dominik Grolimund and Luzius Meisser and Stefan Schmid and Roger Wattenhofer}
   6548 }
   6549 @conference {2006_6,
   6550         title = {Curve25519: new Diffie-Hellman speed records},
   6551         booktitle = {PKC},
   6552         year = {2006},
   6553         month = feb,
   6554         www_section = {Curve25519, ECC, ECDH, GNUnet},
   6555         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/curve25519-20060209.pdf},
   6556         author = {Daniel J. Bernstein}
   6557 }
   6558 @booklet {200,
   6559         title = {Defending the Sybil Attack in P2P Networks: Taxonomy, Challenges, and a Proposal for Self-Registration},
   6560         journal = {DAS-P2P 2006},
   6561         year = {2006},
   6562         month = {April},
   6563         publisher = {Institut fur Telematik, Universitat Karsruhe (TH), Germany},
   6564         abstract = {The robustness of Peer-to-Peer (P2P) networks, in particular of DHT-based overlay networks, suffers significantly when a Sybil attack is performed. We tackle the issue of Sybil attacks from two sides. First, we clarify, analyze, and classify the P2P identifier assignment process. By clearly separating network participants from network nodes, two challenges of P2P networks under a Sybil attack become obvious: i) stability over time, and ii) identity differentiation. Second, as a starting point for a quantitative analysis of time-stability of P2P networks under Sybil attacks and under some assumptions with respect to identity differentiation, we propose an identity registration procedure called self-registration that makes use of the inherent distribution mechanisms of a P2P network},
   6565         www_section = {attack, P2P, robustness},
   6566         url = {http://dsn.tm.uni-karlsruhe.de/medien/publication-confs/dinger_dasp2p06_sybil.pdf},
   6567         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.60.8756.pdf},
   6568         author = {Jochen Dinger and Hannes Hartenstein}
   6569 }
   6570 @book {2006_7,
   6571         title = {Designing Economics Mechanisms},
   6572         year = {2006},
   6573         publisher = {Cambridge University Press},
   6574         organization = {Cambridge University Press},
   6575         address = {Cambridge, U.K},
   6576         abstract = {A mechanism is a mathematical structure that models institutions through which economic activity is guided and coordinated. There are many such institutions; markets are the most familiar ones. Lawmakers, administrators and officers of private companies create institutions in order to achieve desired goals. They seek to do so in ways that economize on the resources needed to operate the institutions, and that provide incentives that induce the required behaviors. This book presents systematic procedures for designing mechanisms that achieve specified performance, and economize on the resources required to operate the mechanism. The systematic design procedures are algorithms for designing informationally efficient mechanisms. Most of the book deals with these procedures of design. When there are finitely many environments to be dealt with, and there is a Nash-implementing mechanism, our algorithms can be used to make that mechanism into an informationally efficient one. Informationally efficient dominant strategy implementation is also studied. Leonid Hurwicz is the Nobel Prize Winner 2007 for The Sveriges Riksbank Prize in Economic Sciences in Memory of Alfred Nobel, along with colleagues Eric Maskin and Roger Myerson, for his work on the effectiveness of markets},
   6577         www_section = {algorithms, Complexity, Computational Geometry, Computer Algebra, Economics: general interest},
   6578         isbn = {9780521836418 },
   6579         doi = {http://dx.doi.org/10.1017/CBO9780511754258},
   6580         author = {Leonid Hurwicz and Stanley Reiter}
   6581 }
   6582 @conference {Golle:sp2006,
   6583         title = {Deterring Voluntary Trace Disclosure in Re-encryption Mix Networks},
   6584         booktitle = {Proceedings of the 2006 IEEE Symposium on Security and Privacy},
   6585         year = {2006},
   6586         month = {May},
   6587         pages = {121--131},
   6588         publisher = {IEEE CS},
   6589         organization = {IEEE CS},
   6590         address = {Oakland, CA},
   6591         abstract = {Mix-networks, a family of anonymous messaging protocols, have been engineered to withstand a wide range of theoretical internal and external adversaries. An undetectable insider threat{\textemdash}voluntary partial trace disclosures by server administrators{\textemdash}remains a troubling source of vulnerability. An administrator's cooperation could be the resulting coercion, bribery, or a simple change of interests. While eliminating this insider threat is impossible, it is feasible to deter such unauthorized disclosures by bundling them with additional penalties. We abstract these costs with collateral keys, which grant access to customizable resources. This article introduces the notion of trace-deterring mix-networks, which encode collateral keys for every server-node into every end-to-end message trace. The network reveals no keying material when the input-to-output transitions of individual servers remain secret. Two permutation strategies for encoding key information into traces, mix-and-flip and all-or-nothing, are presented. We analyze their trade-offs with respect to computational efficiency, anonymity sets, and colluding message senders. Our techniques have sufficiently low overhead for deployment in large-scale elections, thereby providing a sort of publicly verifiable privacy guarantee},
   6592         www_section = {anonymity measurement, privacy, re-encryption},
   6593         doi = {10.1145/1698750.1698758},
   6594         url = {http://portal.acm.org/citation.cfm?id=1698750.1698758},
   6595         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Golle-sp2006.pdf},
   6596         author = {Philippe Golle and XiaoFeng Wang and Jakobsson, Markus and Alex Tsow}
   6597 }
   6598 @book {2006_8,
   6599         title = {Differential Privacy},
   6600         booktitle = {Automata, Languages and Programming},
   6601         series = {Lecture Notes in Computer Science},
   6602         volume = {4052},
   6603         year = {2006},
   6604         pages = {1--12},
   6605         publisher = {Springer Berlin Heidelberg},
   6606         organization = {Springer Berlin Heidelberg},
   6607         abstract = {In 1977 Dalenius articulated a desideratum for statistical databases: nothing about an individual should be learnable from the database that cannot be learned without access to the database. We give a general impossibility result showing that a formalization of Dalenius' goal along the lines of semantic security cannot be achieved. Contrary to intuition, a variant of the result threatens the privacy even of someone not in the database. This state of affairs suggests a new measure, differential privacy, which, intuitively, captures the increased risk to one's privacy incurred by participating in a database.The techniques developed in a sequence of papers [8, 13, 3], culminating in those described in [12], can achieve any desired level of privacy under this measure. In many cases, extremely accurate information about the database can be provided while simultaneously ensuring very high levels of privacy},
   6608         isbn = {978-3-540-35907-4},
   6609         doi = {10.1007/11787006_1},
   6610         url = {http://dx.doi.org/10.1007/11787006_1},
   6611         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DifferentialPrivacy2006Dwork_0.pdf},
   6612         author = {Dwork, Cynthia},
   6613         editor = {Bugliesi, Michele and Preneel, Bart and Sassone, Vladimiro and Wegener, Ingo}
   6614 }
   6615 @conference {1143821,
   6616         title = {A distributed data caching framework for mobile ad hoc networks},
   6617         booktitle = {IWCMC '06: Proceedings of the 2006 international conference on Wireless communications and mobile computing},
   6618         year = {2006},
   6619         pages = {1357--1362},
   6620         publisher = {ACM},
   6621         organization = {ACM},
   6622         address = {New York, NY, USA},
   6623         abstract = {Mobile ad hoc networks (MANETs), enabling multi-hop communication between mobile nodes, are characterized by variable network topology and the demand for efficient dynamic routing protocols. MANETs need no stationary infrastructure or preconstructed base station to coordinate packet transmissions or to advertise information of network topology for mobile nodes. The objective of this paper is to provide MANETs with a distributed data caching framework, which could cache the repetition of data and data path, shorten routes and time span to access data, and enhance data reusable rate to further reduce the use of bandwidth and the consumption of power},
   6624         www_section = {mobile Ad-hoc networks},
   6625         isbn = {1-59593-306-9},
   6626         doi = {10.1145/1143549.1143821},
   6627         url = {http://portal.acm.org/citation.cfm?id=1143821$\#$},
   6628         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.103.426.pdf},
   6629         author = {Wang, Ying-Hong and Chao, Chih-Feng and Lin, Shih-Wei and Chen, Wei-Ting}
   6630 }
   6631 @mastersthesis {2006_9,
   6632         title = {Distributed k-ary System: Algorithms for Distributed Hash Tables},
   6633         year = {2006},
   6634         month = dec,
   6635         pages = {0--209},
   6636         school = {KTH/Royal Institute of Technology},
   6637         type = {Doctoral},
   6638         address = {Stockholm},
   6639         abstract = {This dissertation presents algorithms for data structures called distributed hash tables (DHT) or structured overlay networks, which are used to build scalable self-managing distributed systems. The provided algorithms guarantee lookup consistency in the presence of dynamism: they guarantee consistent lookup results in the presence of nodes joining and leaving. Similarly, the algorithms guarantee that routing never fails while nodes join and leave. Previous algorithms for lookup consistency either suffer from starvation, do not work in the presence of failures, or lack proof of correctness.
   6640 
   6641 Several group communication algorithms for structured overlay networks are presented. We provide an overlay broadcast algorithm, which unlike previous algorithms avoids redundant messages, reaching all nodes in O(log n) time, while using O(n) messages, where n is the number of nodes in the system. The broadcast algorithm is used to build overlay multicast.
   6642 
   6643 
   6644  We introduce bulk operation, which enables a node to efficiently make multiple lookups or send a message to all nodes in a specified set of identifiers. The algorithm ensures that all specified nodes are reached in O(log n) time, sending maximum O(log n) messages per node, regardless of the input size of the bulk operation. Moreover, the algorithm avoids sending redundant messages. Previous approaches required multiple lookups, which consume more messages and can render the initiator a bottleneck. Our algorithms are used in DHT-based storage systems, where nodes can do thousands of lookups to fetch large files. We use the bulk operation algorithm to construct a pseudo-reliable broadcast algorithm. Bulk operations can also be used to implement efficient range queries.
   6645 
   6646 
   6647  Finally, we describe a novel way to place replicas in a DHT, called symmetric replication, that enables parallel recursive lookups. Parallel lookups are known to reduce latencies. However, costly iterative lookups have previously been used to do parallel lookups. Moreover, joins or leaves only require exchanging O(1) messages, while other schemes require at least log(f) messages for a replication degree of f.
   6648 
   6649 The algorithms have been implemented in a middleware called the Distributed k-ary System (DKS), which is briefly described},
   6650         www_section = {distributed hash table, distributed k-ary system, DKS},
   6651         url = {http://eprints.sics.se/516/},
   6652         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Ghodsi\%20-\%20Distributed\%20k-ary\%20System.pdf},
   6653         author = {Ali Ghodsi}
   6654 }
   6655 @conference {AhmedBoutaba2006DistributedPatternMatching,
   6656         title = {Distributed Pattern Matching: A Key to Flexible and Efficient P2P Search},
   6657         booktitle = {2006 IEEE/IFIP Network Operations and Management Symposium NOMS 2006},
   6658         year = {2006},
   6659         pages = {198--208},
   6660         publisher = {IEEE},
   6661         organization = {IEEE},
   6662         abstract = {Flexibility and efficiency are the prime requirements for any P2P search mechanism. Existing P2P systems do not seem to provide satisfactory solution for achieving these two conflicting goals. Unstructured search protocols (as adopted in Gnutella and FastTrack), provide search flexibility but exhibit poor performance characteristics. Structured search techniques (mostly distributed hash table (DHT)-based), on the other hand, can efficiently route queries to target peers but support exact-match queries only. In this paper we present a novel P2P system, called distributed pattern matching system (DPMS), for enabling flexible and efficient search. Distributed pattern matching can be used to solve problems like wildcard searching (for file-sharing P2P systems), partial service description matching (for service discovery systems) etc. DPMS uses a hierarchy of indexing peers for disseminating advertised patterns. Patterns are aggregated and replicated at each level along the hierarchy. Replication improves availability and resilience to peer failure, and aggregation reduces storage overhead. An advertised pattern can be discovered using any subset of its 1-bits; this allows inexact matching and queries in conjunctive normal form. Search complexity (i.e., the number of peers to be probed) in DPMS is O (log N + zetalog N/log N), where N is the total number of peers and zeta is proportional to the number of matches, required in a search result. The impact of churn problem is less severe in DPMS than DHT-based systems. Moreover, DPMS provides guarantee on search completeness for moderately stable networks. We demonstrate the effectiveness of DPMS using mathematical analysis and simulation results},
   6663         www_section = {matching, P2P, search},
   6664         isbn = {1-4244-0142-9},
   6665         doi = {10.1109/NOMS.2006.1687551},
   6666         url = {http://dx.doi.org/10.1109/NOMS.2006.1687551},
   6667         author = {Ahmed, R. and Boutaba, R.}
   6668 }
   6669 @conference {sandberg06distrouting,
   6670         title = {Distributed Routing in Small-World Networks},
   6671         booktitle = {Algorithm Engineering and Experiments},
   6672         year = {2006},
   6673         publisher = {SIAM},
   6674         organization = {SIAM},
   6675         abstract = {Theoretical basis for the routing protocol of Freenet 0.7 },
   6676         www_section = {small-world},
   6677         url = {http://www.math.chalmers.se/~ossa/wrt.html},
   6678         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swroute.pdf},
   6679         author = {Sandberg, Oskar}
   6680 }
   6681 @book {2006_10,
   6682         title = {DNS-Based Service Discovery in Ad Hoc Networks: Evaluation and Improvements},
   6683         volume = {Volume 4217/2006},
   6684         year = {2006},
   6685         publisher = {Springer Berlin / Heidelberg},
   6686         organization = {Springer Berlin / Heidelberg},
   6687         abstract = {In wireless networks, devices must be able to dynamically discover and share services in the environment. The problem of service discovery has attracted great research interest in the last years, particularly for ad hoc networks. Recently, the IETF has proposed the use of the DNS protocol for service discovery. For ad hoc networks, the IETF works in two proposals of distributed DNS, Multicast DNS and LLMNR, that can both be used for service discovery. In this paper we describe and compare through simulation the performance of service discovery based in these two proposals of distributed DNS. We also propose four simple improvements that reduce the traffic generated, and so the power consumption, especially of the most limited, battery powered, devices. We present simulation results that show the impact of our improvements in a typical scenario},
   6688         www_section = {ad-hoc networks, DNS},
   6689         issn = {978-3-540-45174-7},
   6690         doi = {10.1007/11872153},
   6691         url = {http://www.springerlink.com/content/m8322m1006416270/},
   6692         author = {Celeste Campo and Carlos Garc{\'\i}a-Rubio}
   6693 }
   6694 @conference {Md06e.:anonymous,
   6695         title = {E.: Anonymous Secure Communication in Wireless Mobile Ad-hoc Networks},
   6696         booktitle = {In: Proceedings of the First International Conference on Ubiquitous Convergence Technology},
   6697         year = {2006},
   6698         pages = {131--140},
   6699         publisher = {Springer},
   6700         organization = {Springer},
   6701         abstract = {The main characteristic of a mobile ad-hoc network is its infrastructure-less, highly dynamic topology, which is subject to malicious traffic analysis. Malicious intermediate nodes in wireless mobile ad-hoc networks are a threat concerning security as well as anonymity of exchanged information. To protect anonymity and achieve security of nodes in mobile ad-hoc networks, an anonymous on-demand routing protocol, termed RIOMO, is proposed. For this purpose, pseudo IDs of the nodes are generated considering Pairing-based Cryptography. Nodes can generate their own pseudo IDs independently. As a result RIOMO reduces pseudo IDs maintenance costs. Only trust-worthy nodes are allowed to take part in routing to discover a route. To ensure trustiness each node has to make authentication to its neighbors through an anonymous authentication process. Thus RIOMO safely communicates between nodes without disclosing node identities; it also provides different desirable anonymous properties such as identity privacy, location privacy, route anonymity, and robustness against several attacks},
   6702         www_section = {ad-hoc networks, anonymity, routing},
   6703         doi = {10.1007/978-3-540-71789-8},
   6704         url = {http://www.springerlink.com/content/g6334148068w1254/},
   6705         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.74.1585.pdf},
   6706         author = {Sk. Md. Mizanur Rahman and Atsuo Inomata and Takeshi Okamoto and Masahiro Mambo}
   6707 }
   6708 @conference {danezis:weis2006,
   6709         title = {The Economics of Mass Surveillance and the Questionable Value of Anonymous Communications},
   6710         booktitle = {Proceedings of the Fifth Workshop on the Economics of Information Security (WEIS 2006)},
   6711         year = {2006},
   6712         month = {June},
   6713         address = {Cambridge, UK},
   6714         abstract = {We present a model of surveillance based on social network theory, where observing one participant also leaks some information about third parties. We examine how many nodes an adversary has to observe in order to extract information about the network, but also how the method for choosing these nodes (target selection) greatly influences the resulting intelligence. Our results provide important insights into the actual security of anonymous communication, and their ability to minimise surveillance and disruption in a social network. They also allow us to draw interesting policy conclusions from published interception figures, and get a better estimate of the amount of privacy invasion and the actual volume of surveillance taking place},
   6715         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.9384},
   6716         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-weis2006.pdf},
   6717         author = {George Danezis and Bettina Wittneben},
   6718         editor = {Ross Anderson}
   6719 }
   6720 @article {1151692,
   6721         title = {Energy-aware lossless data compression},
   6722         journal = {ACM Trans. Comput. Syst},
   6723         volume = {24},
   6724         number = {3},
   6725         year = {2006},
   6726         month = jan,
   6727         pages = {250--291},
   6728         publisher = {ACM},
   6729         address = {New York, NY, USA},
   6730         abstract = {Wireless transmission of a single bit can require over 1000 times more energy than a single computation. It can therefore be beneficial to perform additional computation to reduce the number of bits transmitted. If the energy required to compress data is less than the energy required to send it, there is a net energy savings and an increase in battery life for portable computers. This article presents a study of the energy savings possible by losslessly compressing data prior to transmission. A variety of algorithms were measured on a StrongARM SA-110 processor. This work demonstrates that, with several typical compression algorithms, there is a actually a net energy increase when compression is applied before transmission. Reasons for this increase are explained and suggestions are made to avoid it. One such energy-aware suggestion is asymmetric compression, the use of one compression algorithm on the transmit side and a different algorithm for the receive path. By choosing the lowest-energy compressor and decompressor on the test platform, overall energy to send and receive data can be reduced by 11\% compared with a well-chosen symmetric pair, or up to 57\% over the default symmetric zlib scheme},
   6731         www_section = {compression, energy-aware, lossless},
   6732         issn = {0734-2071},
   6733         doi = {10.1145/1151690.1151692},
   6734         url = {http://portal.acm.org/citation.cfm?id=1151692$\#$},
   6735         author = {Kenneth Barr and Asanovi{\'c}, Krste}
   6736 }
   6737 @conference {1143660,
   6738         title = {Estimation based erasure-coding routing in delay tolerant networks},
   6739         booktitle = {IWCMC '06: Proceedings of the 2006 international conference on Wireless communications and mobile computing},
   6740         year = {2006},
   6741         pages = {557--562},
   6742         publisher = {ACM},
   6743         organization = {ACM},
   6744         address = {New York, NY, USA},
   6745         abstract = {Wireless Delay Tolerant Networks (DTNs) are intermittently connected mobile wireless networks. Some well-known assumptions of traditional networks are no longer true in DTNs, which makes routing in DTNs a challenging problem. We observe that mobile nodes in realistic wireless DTNs may always have some mobility pattern information which can be used to estimate one node's ability to deliver a specific message. This estimation can greatly enhance the routing performance in DTNs. Furthermore, we adopt an alternative way to generate redundancy using erasure coding. With a fixed overhead, the erasure coding can generate a large number of message-blocks instead of a few replications, and therefore it allows the transmission of only a portion of message to a relay. This can greatly increase the routing diversity when combined with estimation-based approaches. We have conducted extensive simulations to evaluate the performance of our scheme. The results demonstrate that our scheme outperforms previously proposed schemes},
   6746         www_section = {delay tolerant network},
   6747         isbn = {1-59593-306-9},
   6748         doi = {10.1145/1143549.1143660},
   6749         url = {http://portal.acm.org/citation.cfm?id=1143549.1143660$\#$},
   6750         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.249.pdf},
   6751         author = {Liao, Yong and Tan, Kun and Zhang, Zhensheng and Gao, Lixin}
   6752 }
   6753 @article {1217950,
   6754         title = {Experiences in building and operating ePOST, a reliable peer-to-peer application},
   6755         journal = {SIGOPS Oper. Syst. Rev},
   6756         volume = {40},
   6757         number = {4},
   6758         year = {2006},
   6759         pages = {147--159},
   6760         publisher = {ACM},
   6761         address = {New York, NY, USA},
   6762         abstract = {Peer-to-peer (p2p) technology can potentially be used to build highly reliable applications without a single point of failure. However, most of the existing applications, such as file sharing or web caching, have only moderate reliability demands. Without a challenging proving ground, it remains unclear whether the full potential of p2p systems can be realized.To provide such a proving ground, we have designed, deployed and operated a p2p-based email system. We chose email because users depend on it for their daily work and therefore place high demands on the availability and reliability of the service, as well as the durability, integrity, authenticity and privacy of their email. Our system, ePOST, has been actively used by a small group of participants for over two years.In this paper, we report the problems and pitfalls we encountered in this process. We were able to address some of them by applying known principles of system design, while others turned out to be novel and fundamental, requiring us to devise new solutions. Our findings can be used to guide the design of future reliable p2p systems and provide interesting new directions for future research},
   6763         www_section = {P2P},
   6764         issn = {0163-5980},
   6765         doi = {10.1145/1218063.1217950},
   6766         url = {http://portal.acm.org/citation.cfm?id=1218063.1217950$\#$},
   6767         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/epost-eurosys2006.pdf},
   6768         author = {Mislove, Alan and Post, Ansley and Haeberlen, Andreas and Peter Druschel}
   6769 }
   6770 @conference {guha6ess,
   6771         title = {An Experimental Study of the Skype Peer-to-Peer VoIP System},
   6772         booktitle = {IPTPS'06--Proceedings of The 5th International Workshop on Peer-to-Peer Systems },
   6773         year = {2006},
   6774         month = feb,
   6775         pages = {1--6},
   6776         address = {Santa Barbara, CA, USA},
   6777         abstract = {Despite its popularity, relatively little is known about the traf- fic characteristics of the Skype VoIP system and how they differ from other P2P systems. We describe an experimental study of Skype VoIP traffic conducted over a one month period, where over 30 million datapoints were collected regarding the population of online clients, the number of supernodes, and their traffic characteristics. The results indicate that although the structure of the Skype system appears to be similar to other P2P systems, particularly KaZaA, there are several significant differences in traffic. The number of active clients shows diurnal and work-week behavior, correlating with normal working hours regardless of geography. The population of supernodes in the system tends to be relatively stable; thus node churn, a significant concern in other systems, seems less problematic in Skype. The typical bandwidth load on a supernode is relatively low, even if the supernode is relaying VoIP traffic. The paper aims to aid further understanding of a signifi- cant, successful P2P VoIP system, as well as provide experimental data that may be useful for design and modeling of such systems. These results also imply that the nature of a VoIP P2P system like Skype differs fundamentally from earlier P2P systems that are oriented toward file-sharing, and music and video download applications, and deserves more attention from the research community},
   6778         www_section = {decentralized, indexing, overlay, P2P, skype, unstructured},
   6779         url = {http://saikat.guha.cc/pub/iptps06-skype/},
   6780         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2706\%20-\%20Skype\%20p2p\%20VoIP\%20System.pdf},
   6781         author = {Saikat Guha and Daswani, Neil and Jain, Ravi}
   6782 }
   6783 @conference {Saito:2006:FTI:1130897.1131000_0,
   6784         title = {Fair Trading of Information: A Proposal for the Economics of Peer-to-Peer Systems},
   6785         booktitle = {ARES'06. Proceedings of the First International Conference on Availability, Reliability and Security},
   6786         year = {2006},
   6787         month = apr,
   6788         pages = {764--771},
   6789         publisher = {IEEE Computer Society},
   6790         organization = {IEEE Computer Society},
   6791         address = {Vienna, Austria},
   6792         abstract = {A P2P currency can be a powerful tool for promoting exchanges in a trusted way that make use of under-utilized resources both in computer networks and in real life. There are three classes of resource that can be exchanged in a P2P system: atoms (ex. physical goods by way of auctions), bits (ex. data files) and presences (ex. time slots for computing resources such as CPU, storage or bandwidth). If these are equally treated as commodities, however, the economy of the system is likely to collapse, because data files can be reproduced at a negligibly small cost whereas time slots for computing resources cannot even be stockpiled for future use. This paper clarifies this point by simulating a small world of traders, and proposes a novel way for applying the "reduction over time" feature[14] of i-WAT[11], a P2P currency. In the proposed new economic order (NEO), bits are freely shared among participants, whereas their producers are supported by peers, being given freedom to issue exchange tickets whose values are reduced over time},
   6793         www_section = {economics, information trading},
   6794         isbn = {0-7695-2567-9},
   6795         doi = {10.1109/ARES.2006.62},
   6796         url = {http://dl.acm.org/citation.cfm?id=1130897.1131000},
   6797         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ARES\%2706\%20-\%20Fair\%20Trading\%20of\%20Information.pdf},
   6798         author = {Saito, Kenji and Morino, Eiichi and Murai, Jun}
   6799 }
   6800 @conference {Saito:2006:FTI:1130897.1131000,
   6801         title = {Fair Trading of Information: A Proposal for the Economics of Peer-to-Peer Systems},
   6802         booktitle = {ARES'06. Proceedings of the First International Conference on Availability, Reliability and Security},
   6803         year = {2006},
   6804         month = apr,
   6805         pages = {764--771},
   6806         publisher = {IEEE Computer Society},
   6807         organization = {IEEE Computer Society},
   6808         address = {Vienna, Austria},
   6809         abstract = {A P2P currency can be a powerful tool for promoting exchanges in a trusted way that make use of under-utilized resources both in computer networks and in real life. There are three classes of resource that can be exchanged in a P2P system: atoms (ex. physical goods by way of auctions), bits (ex. data files) and presences (ex. time slots for computing resources such as CPU, storage or bandwidth). If these are equally treated as commodities, however, the economy of the system is likely to collapse, because data files can be reproduced at a negligibly small cost whereas time slots for computing resources cannot even be stockpiled for future use. This paper clarifies this point by simulating a small world of traders, and proposes a novel way for applying the "reduction over time" feature[14] of i-WAT[11], a P2P currency. In the proposed new economic order (NEO), bits are freely shared among participants, whereas their producers are supported by peers, being given freedom to issue exchange tickets whose values are reduced over time},
   6810         isbn = {0-7695-2567-9},
   6811         doi = {10.1109/ARES.2006.62},
   6812         url = {http://dl.acm.org/citation.cfm?id=1130897.1131000},
   6813         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ARES\%2706\%20-\%20Fair\%20Trading\%20of\%20Information.pdf},
   6814         author = {Saito, Kenji and Morino, Eiichi and Murai, Jun}
   6815 }
   6816 @article {1217937,
   6817         title = {Fireflies: scalable support for intrusion-tolerant network overlays},
   6818         journal = {SIGOPS Oper. Syst. Rev},
   6819         volume = {40},
   6820         number = {4},
   6821         year = {2006},
   6822         pages = {3--13},
   6823         publisher = {ACM},
   6824         address = {New York, NY, USA},
   6825         abstract = {This paper describes and evaluates Fireflies, a scalable protocol for supporting intrusion-tolerant network overlays. While such a protocol cannot distinguish Byzantine nodes from correct nodes in general, Fireflies provides correct nodes with a reasonably current view of which nodes are live, as well as a pseudo-random mesh for communication. The amount of data sent by correct nodes grows linearly with the aggregate rate of failures and recoveries, even if provoked by Byzantine nodes. The set of correct nodes form a connected submesh; correct nodes cannot be eclipsed by Byzantine nodes. Fireflies is deployed and evaluated on PlanetLab},
   6826         issn = {0163-5980},
   6827         doi = {10.1145/1218063.1217937},
   6828         url = {http://portal.acm.org/citation.cfm?id=1218063.1217937$\#$},
   6829         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fireflies.pdf},
   6830         author = {H{\r a}vard Johansen and Allavena, Andr{\'e} and Robbert Van Renesse}
   6831 }
   6832 @conference {Locher06freeriding,
   6833         title = {Free Riding in BitTorrent is Cheap},
   6834         booktitle = {In HotNets},
   6835         year = {2006},
   6836         abstract = {While it is well-known that BitTorrent is vulnerable to selfish behavior, this paper demonstrates that even entire files can be downloaded without reciprocating at all in BitTorrent. To this end, we present BitThief, a free riding client that never contributes any real data. First, we show that simple tricks suffice in order to achieve high download rates, even in the absence of seeders. We also illustrate how peers in a swarm react to various sophisticated attacks. Moreover, our analysis reveals that sharing communities{\textemdash}communities originally intended to offer downloads of good quality and to promote cooperation among peers{\textemdash}provide many incentives to cheat},
   6837         www_section = {BitTorrent},
   6838         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.9307},
   6839         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.67.9307.pdf},
   6840         author = {Thomas Locher and Patrick Moor and Stefan Schmid and Roger Wattenhofer}
   6841 }
   6842 @conference {Grolimund06havelaar:a,
   6843         title = {Havelaar: A Robust and Efficient Reputation System for Active Peer-to-Peer Systems},
   6844         booktitle = {NetEcon'06. 1st Workshop on the Economics of Networked Systems Ann Arbor},
   6845         year = {2006},
   6846         month = jun,
   6847         address = {Ann Arbor, Michigan},
   6848         abstract = {Peer-to-peer (p2p) systems have the potential to harness huge amounts of resources. Unfortunately, however, it has been shown that most of today's p2p networks suffer from a large fraction of free-riders, which mostly consume resources without contributing much to the system themselves. This results in an overall performance degradation. One particularly interesting resource is bandwidth. Thereby, a service differentiation approach seems appropriate, where peers contributing higher upload bandwidth are rewarded with higher download bandwidth in return. Keeping track of the contribution of each peer in an open, decentralized environment, however, is not trivial; many systems which have been proposed are susceptible to false reports. Besides being prone to attacks, some solutions have a large communication and computation overhead, which can even be linear in the number of transactions{\textemdash}an unacceptable burden in practical and active systems. In this paper, we propose a reputation system which overcomes this scaling problem. Our analytical and simulation results are promising, indicating that the mechanism is accurate and efficient, especially when applied to systems where there are lots of transactions (e.g., due to erasure coding)},
   6849         www_section = {free-riding, harvelaar, P2P, peer-to-peer networking, performance degradation, reputation system},
   6850         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2706\%20-\%20Harvelaar.pdf},
   6851         author = {Dominik Grolimund and Luzius Meisser and Stefan Schmid and Roger Wattenhofer}
   6852 }
   6853 @conference {HotOrNot,
   6854         title = {Hot or Not: Revealing Hidden Services by their Clock Skew},
   6855         booktitle = {Proceedings of CCS 2006},
   6856         year = {2006},
   6857         month = {October},
   6858         publisher = {ACM  New York, NY, USA},
   6859         organization = {ACM  New York, NY, USA},
   6860         abstract = {Location-hidden services, as offered by anonymity systems such as Tor, allow servers to be operated under a pseudonym. As Tor is an overlay network, servers hosting hidden services are accessible both directly and over the anonymous channel. Traffic patterns through one channel have observable effects on the other, thus allowing a service's pseudonymous identity and IP address to be linked. One proposed solution to this vulnerability is for Tor nodes to provide fixed quality of service to each connection, regardless of other traffic, thus reducing capacity but resisting such interference attacks. However, even if each connection does not influence the others, total throughput would still affect the load on the CPU, and thus its heat output. Unfortunately for anonymity, the result of temperature on clock skew can be remotely detected through observing timestamps. This attack works because existing abstract models of anonymity-network nodes do not take into account the inevitable imperfections of the hardware they run on. Furthermore, we suggest the same technique could be exploited as a classical covert channel and can even provide geolocation},
   6861         www_section = {anonymity, clock skew, covert channels, fingerprinting, Tor},
   6862         isbn = {1-59593-518-5},
   6863         doi = {10.1145/1180405.1180410},
   6864         url = {http://portal.acm.org/citation.cfm?id=1180410},
   6865         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotOrNot.pdf},
   6866         author = {Steven J. Murdoch}
   6867 }
   6868 @conference {clonewars,
   6869         title = {How to win the clonewars: efficient periodic n-times anonymous authentication},
   6870         booktitle = {Proceedings of the 13th ACM conference on Computer and communications security (CCS 2006)},
   6871         year = {2006},
   6872         pages = {201--210},
   6873         publisher = {ACM Press},
   6874         organization = {ACM Press},
   6875         address = {New York, NY, USA},
   6876         abstract = {We create a credential system that lets a user anonymously authenticate at most $n$ times in a single time period. A user withdraws a dispenser of n e-tokens. She shows an e-token to a verifier to authenticate herself; each e-token can be used only once, however, the dispenser automatically refreshes every time period. The only prior solution to this problem, due to Damg{\r a}rd et al. [29], uses protocols that are a factor of k slower for the user and verifier, where k is the security parameter. Damg{\r a}rd et al. also only support one authentication per time period, while we support n. Because our construction is based on e-cash, we can use existing techniques to identify a cheating user, trace all of her e-tokens, and revoke her dispensers. We also offer a new anonymity service: glitch protection for basically honest users who (occasionally) reuse e-tokens. The verifier can always recognize a reused e-token; however, we preserve the anonymity of users who do not reuse e-tokens too often},
   6877         www_section = {clone detection, credentials, n-anonymous authentication},
   6878         isbn = {1-59593-518-5},
   6879         doi = {10.1145/1180405.1180431},
   6880         url = {http://portal.acm.org/citation.cfm?id=1180431},
   6881         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clonewars.pdf},
   6882         author = {Jan Camenisch and Susan Hohenberger and Markulf Kohlweiss and Anna Lysyanskaya and Mira Meyerovich}
   6883 }
   6884 @conference {1157518,
   6885         title = {iDIBS: An Improved Distributed Backup System},
   6886         booktitle = {ICPADS '06: Proceedings of the 12th International Conference on Parallel and Distributed Systems},
   6887         year = {2006},
   6888         pages = {58--67},
   6889         publisher = {IEEE Computer Society},
   6890         organization = {IEEE Computer Society},
   6891         address = {Washington, DC, USA},
   6892         abstract = {iDIBS is a peer-to-peer backup system which optimizes the Distributed Internet Backup System (DIBS). iDIBS offers increased reliability by enhancing the robustness of existing packet transmission mechanism. Reed-Solomon erasure codes are replaced with Luby Transform codes to improve computation speed and scalability of large files. Lists of peers are automatically stored onto nodes to reduce recovery time. To realize these optimizations, an acceptable amount of data overhead and an increase in network utilization are imposed on the iDIBS system. Through a variety of experiments, we demonstrate that iDIBS significantly outperforms DIBS in the areas of data computational complexity, backup reliability, and overall performance},
   6893         www_section = {backup, P2P, reliability},
   6894         isbn = {0-7695-2612-8},
   6895         doi = {10.1109/ICPADS.2006.52},
   6896         url = {http://portal.acm.org/citation.cfm?id=1156431.1157518$\#$},
   6897         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.94.4826.pdf},
   6898         author = {Morcos, Faruck and Chantem, Thidapat and Little, Philip and Gasiba, Tiago and Thain, Douglas}
   6899 }
   6900 @conference {clayton:pet2006,
   6901         title = {Ignoring the Great Firewall of China},
   6902         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   6903         year = {2006},
   6904         month = {June},
   6905         pages = {20--35},
   6906         publisher = {Springer},
   6907         organization = {Springer},
   6908         address = {Cambridge, UK},
   6909         abstract = {The so-called {\textquotedblleft}Great Firewall of China{\textquotedblright} operates, in part, by inspecting TCP packets for keywords that are to be blocked. If the keyword is present, TCP reset packets (viz: with the RST flag set) are sent to both endpoints of the connection, which then close. However, because the original packets are passed through the firewall unscathed, if the endpoints completely ignore the firewall's resets, then the connection will proceed unhindered. Once one connection has been blocked, the firewall makes further easy-to-evade attempts to block further connections from the same machine. This latter behaviour can be leveraged into a denial-of-service attack on third-party machines},
   6910         isbn = {978-3-540-68790-0},
   6911         doi = {10.1007/11957454},
   6912         url = {http://www.springerlink.com/content/7224582654260k03/},
   6913         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clayton-pet2006.pdf},
   6914         author = {Richard Clayton and Steven J. Murdoch and Robert N. M. Watson},
   6915         editor = {George Danezis and Philippe Golle}
   6916 }
   6917 @conference { kutzner06igor,
   6918         title = {The IGOR File System for Efficient Data Distribution in the GRID},
   6919         booktitle = {Proceedings of the Cracow Grid Workshop CGW 2006},
   6920         year = {2006},
   6921         address = {Cracow, Poland},
   6922         abstract = {Many GRID applications such as drug discovery in the pharmaceutical industry or simulations in meteorology and generally in the earth sciences rely on large data bases. Historically, these data bases are flat files on the order of several hundred megabytes each. Today, sites often need to download dozens or hundreds of such files before they can start a simulation or analysis run, even if the respective application accesses only small fractions of the respective files. The IGOR file system (which has been developed within the EU FP6 SIMDAT project), addresses the need for an easy and efficient way to access large files across the Internet. IGOR-FS is especially suited for (potentially globally) distributed sites that read or modify only small portions of the files. IGOR-FS provides fine grained versioning and backup capabilities; and it is built on strong cryptography to protect confidential data both in the network and on the local sites storage systems},
   6923         www_section = {file systems, GRID},
   6924         isbn = {83-915141-7-X},
   6925         affiliation = {University of Karlsruhe, Germany},
   6926         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.68.1091},
   6927         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner06igor.pdf},
   6928         author = {Kendy Kutzner and Thomas Fuhrmann}
   6929 }
   6930 @conference {conf/infocom/StutzbachR06,
   6931         title = {Improving Lookup Performance Over a Widely-Deployed DHT},
   6932         booktitle = {INFOCOM},
   6933         year = {2006},
   6934         publisher = {IEEE},
   6935         organization = {IEEE},
   6936         abstract = {During recent years, Distributed Hash Tables (DHTs) have been extensively studied through simulation and analysis. However, due to their limited deployment, it has not been possible to observe the behavior of a widely-deployed DHT in practice. Recently, the popular eMule file-sharing software incorporated a Kademlia-based DHT, called Kad, which currently has around one million simultaneous users. In this paper, we empirically study the performance of the key DHT operation, lookup, over Kad. First, we analytically derive the benefits of different ways to increase the richness of routing tables in Kademlia-based DHTs. Second, we empirically characterize two aspects of the accuracy of routing tables in Kad, namely completeness and freshness, and characterize their impact on Kad's lookup performance. Finally, we investigate how the efficiency and consistency of lookup in Kad can be improved by performing parallel lookup and maintaining multiple replicas, respectively. Our results pinpoint the best operating point for the degree of lookup parallelism and the degree of replication for Kad},
   6937         www_section = {distributed hash table, redundancy},
   6938         url = {http://dblp.uni-trier.de/db/conf/infocom/infocom2006.html$\#$StutzbachR06},
   6939         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infocom06-kad.pdf},
   6940         author = {Stutzbach, Daniel and Rejaie, Reza}
   6941 }
   6942 @conference {Pai06improvingrobustness,
   6943         title = {Improving Robustness of Peer-to-Peer Streaming with Incentives},
   6944         booktitle = {NetEcon'06. 1st Workshop on the Economics of Networked Systems},
   6945         year = {2006},
   6946         month = jun,
   6947         publisher = {ACM},
   6948         organization = {ACM},
   6949         address = {Ann Arbor, Michigan, USA},
   6950         abstract = {In this paper we argue that a robust incentive mechanism is important in a real-world peer-to-peer streaming system to ensure that nodes contribute as much upload bandwidth as they can. We show that simple tit-for-tat mechanisms which work well in file-sharing systems like BitTorrent do not perform well given the additional delay and bandwidth constraints imposed by live streaming. We present preliminary experimental results for an incentive mechanism based on the Iterated Prisoner's Dilemma problem that allows all nodes to download with low packet loss when there is sufficient capacity in the system, but when the system is resource-starved, nodes that contribute upload bandwidth receive better service than those that do not. Moreover, our algorithm does not require nodes to rely on any information other than direct observations of its neighbors ' behavior towards it },
   6951         www_section = {peer-to-peer streaming, tit-for-tat},
   6952         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2706\%20-\%20Improving\%20robustness\%20of\%20p2p\%20streaming.pdf},
   6953         author = {Vinay Pai and Alexander E. Mohr}
   6954 }
   6955 @conference {ciaccio:pet2006,
   6956         title = {Improving Sender Anonymity in a Structured Overlay with Imprecise Routing},
   6957         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   6958         year = {2006},
   6959         month = {June},
   6960         pages = {190--207},
   6961         publisher = {Springer},
   6962         organization = {Springer},
   6963         address = {Cambridge, UK},
   6964         abstract = {In the framework of peer to peer distributed systems, the problem of anonymity in structured overlay networks remains a quite elusive one. It is especially unclear how to evaluate and improve sender anonymity, that is, untraceability of the peers who issue messages to other participants in the overlay. In a structured overlay organized as a chordal ring, we have found that a technique originally developed for recipient anonymity also improves sender anonymity. The technique is based on the use of imprecise entries in the routing tables of each participating peer. Simulations show that the sender anonymity, as measured in terms of average size of anonymity set, decreases slightly if the peers use imprecise routing; yet, the anonymity takes a better distribution, with good anonymity levels becoming more likely at the expenses of very high and very low levels. A better quality of anonymity service is thus provided to participants},
   6965         www_section = {anonymity, P2P},
   6966         isbn = {978-3-540-68790-0},
   6967         doi = {10.1007/11957454},
   6968         url = {http://www.springerlink.com/content/v473127846n07255/},
   6969         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ciaccio-pet2006.pdf},
   6970         author = {Giuseppe Ciaccio},
   6971         editor = {George Danezis and Philippe Golle}
   6972 }
   6973 @conference {2006_11,
   6974         title = {Improving traffic locality in BitTorrent via biased neighbor selection},
   6975         booktitle = {Proceedings of the 26th IEEE International Conference on Distributed Computing Systems},
   6976         year = {2006},
   6977         month = jan,
   6978         pages = {0--66},
   6979         publisher = {IEEE Computer Society},
   6980         organization = {IEEE Computer Society},
   6981         address = {Lisboa, Portugal},
   6982         abstract = {Peer-to-peer (P2P) applications such as BitTorrent ignore traffic costs at ISPs and generate a large amount of cross-ISP traffic. As a result, ISPs often throttle BitTorrent traffic to control the cost. In this paper, we examine a new approach to enhance BitTorrent traffic locality, biased neighbor selection, in which a peer chooses the majority, but not all, of its neighbors from peers within the same ISP. Using simulations, we show that biased neighbor selection maintains the nearly optimal performance of Bit- Torrent in a variety of environments, and fundamentally reduces the cross-ISP traffic by eliminating the traffic's linear growth with the number of peers. Key to its performance is the rarest first piece replication algorithm used by Bit- Torrent clients. Compared with existing locality-enhancing approaches such as bandwidth limiting, gateway peers, and caching, biased neighbor selection requires no dedicated servers and scales to a large number of BitTorrent networks},
   6983         www_section = {BitTorrent, neighbor selection, peer-to-peer networking, performance, traffic locality},
   6984         isbn = { 0-7695-2540-7},
   6985         issn = {1063-6927},
   6986         doi = {10.1109/ICDCS.2006.48  },
   6987         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2706\%20-\%20Improving\%20traffic\%20locality\%20in\%20BitTorrent.pdf},
   6988         author = {Ruchir Bindal and Pei Cao and William Chan and Jan Medved and George Suwala and Tony Bates and Amy Zhang}
   6989 }
   6990 @conference {Feigenbaum:2006:IIR:1134707.1134722,
   6991         title = {Incentive-compatible interdomain routing},
   6992         booktitle = {EC'06. Proceedings of the 7th ACM Conference on Electronic Commerce},
   6993         series = {EC '06},
   6994         year = {2006},
   6995         month = jun,
   6996         pages = {130--139},
   6997         publisher = {ACM},
   6998         organization = {ACM},
   6999         address = {Arbor, Michigan},
   7000         abstract = {The routing of traffic between Internet domains, or Autonomous Systems (ASes), a task known as interdomain routing, is currently handled by the Border Gateway Protocol (BGP). Using BGP, autonomous systems can apply semantically rich routing policies to choose interdomain routes in a distributed fashion. This expressiveness in routing-policy choice supports domains' autonomy in network operations and in business decisions, but it comes at a price: The interaction of locally defined routing policies can lead to unexpected global anomalies, including route oscillations or overall protocol divergence. Networking researchers have addressed this problem by devising constraints on policies that guarantee BGP convergence without unduly limiting expressiveness and autonomy.In addition to taking this engineering or "protocol-design" approach, researchers have approached interdomain routing from an economic or "mechanism-design" point of view. It is known that lowest-cost-path (LCP) routing can be implemented in a truthful, BGP-compatible manner but that several other natural classes of routing policies cannot. In this paper, we present a natural class of interdomain-routing policies that is more realistic than LCP routing and admits incentive-compatible, BGP-compatible implementation. We also present several positive steps toward a general theory of incentive-compatible interdomain routing},
   7001         www_section = {border gateway protocol (BGP), distributed algorithmic mechanism design, interdomain routing},
   7002         isbn = {1-59593-236-4},
   7003         doi = {http://doi.acm.org/10.1145/1134707.1134722},
   7004         url = {http://doi.acm.org/10.1145/1134707.1134722},
   7005         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2706\%20-\%20Incentive-compatible\%20interdomain\%20routing.pdf},
   7006         author = {Feigenbaum, Joan and Ramachandran, Vijay and Schapira, Michael}
   7007 }
   7008 @article {DBLP:journals/corr/abs-cs-0611016,
   7009         title = {Increasing Data Resilience of Mobile Devices with a Collaborative Backup Service},
   7010         journal = {CoRR},
   7011         volume = {abs/cs/0611016},
   7012         year = {2006},
   7013         abstract = {Whoever has had his cell phone stolen knows how frustrating it is to be unable to get his contact list back. To avoid data loss when losing or destroying a mobile device like a PDA or a cell phone, data is usually backed-up to a fixed station. However, in the time between the last backup and the failure, important data can have been produced and then lost. To handle this issue, we propose a transparent collaborative backup system. Indeed, by saving data on other mobile devices between two connections to a global infrastructure, we can resist to such scenarios. In this paper, after a general description of such a system, we present a way to replicate data on mobile devices to attain a prerequired resilience for the backup},
   7014         url = {http://www.pubzone.org/dblp/journals/corr/abs-cs-0611016},
   7015         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0611016v1.pdf},
   7016         author = {Damien Martin-Guillerez and Michel Ban{\^a}tre and Paul Couderc}
   7017 }
   7018 @article {WrightMM06,
   7019         title = {On Inferring Application Protocol Behaviors in Encrypted Network Traffic},
   7020         journal = {Journal of Machine Learning Research},
   7021         volume = {7},
   7022         year = {2006},
   7023         pages = {2745--2769},
   7024         publisher = {MIT Press},
   7025         address = {Cambridge, MA, USA},
   7026         abstract = {Several fundamental security mechanisms for restricting access to network resources rely on the ability of a reference monitor to inspect the contents of traffic as it traverses the network. However, with the increasing popularity of cryptographic protocols, the traditional means of inspecting packet contents to enforce security policies is no longer a viable approach as message contents are concealed by encryption. In this paper, we investigate the extent to which common application protocols can be identified using only the features that remain intact after encryption---namely packet size, timing, and direction. We first present what we believe to be the first exploratory look at protocol identification in encrypted tunnels which carry traffic from many TCP connections simultaneously, using only post-encryption observable features. We then explore the problem of protocol identification in individual encrypted TCP connections, using much less data than in other recent approaches. The results of our evaluation show that our classifiers achieve accuracy greater than 90\% for several protocols in aggregate traffic, and, for most protocols, greater than 80\% when making fine-grained classifications on single connections. Moreover, perhaps most surprisingly, we show that one can even estimate the number of live connections in certain classes of encrypted tunnels to within, on average, better than 20\%},
   7027         www_section = {hidden Markov models, traffic classification},
   7028         issn = {1533-7928},
   7029         url = {http://portal.acm.org/citation.cfm?id=1248647},
   7030         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WrightMM06.pdf},
   7031         author = {Charles Wright and Fabian Monrose and Gerald M. Masson}
   7032 }
   7033 @conference {Liberatore:2006,
   7034         title = {Inferring the Source of Encrypted HTTP Connections},
   7035         booktitle = {Proceedings of the 13th ACM conference on Computer and Communications Security (CCS 2006)},
   7036         year = {2006},
   7037         month = {October},
   7038         pages = {255--263},
   7039         publisher = {ACM  New York, NY, USA},
   7040         organization = {ACM  New York, NY, USA},
   7041         abstract = {We examine the effectiveness of two traffic analysis techniques for identifying encrypted HTTP streams. The techniques are based upon classification algorithms, identifying encrypted traffic on the basis of similarities to features in a library of known profiles. We show that these profiles need not be collected immediately before the encrypted stream; these methods can be used to identify traffic observed both well before and well after the library is created. We give evidence that these techniques will exhibit the scalability necessary to be effective on the Internet. We examine several methods of actively countering the techniques, and we find that such countermeasures are effective, but at a significant increase in the size of the traffic stream. Our claims are substantiated by experiments and simulation on over 400,000 traffic streams we collected from 2,000 distinct web sites during a two month period},
   7042         www_section = {latency, network forensics, traffic analysis},
   7043         isbn = {1-59593-518-5},
   7044         doi = {10.1145/1180405.1180437},
   7045         url = {http://portal.acm.org/citation.cfm?id=1180437},
   7046         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Liberatore-2006.pdf},
   7047         author = {Marc Liberatore and Brian Neil Levine}
   7048 }
   7049 @mastersthesis {Saito:2004:MTP:968884.969522,
   7050         title = {i-WAT: The Internet WAT System--An Architecture for Maintaining Trust and Facilitating Peer-to-Peer Barter Relationships},
   7051         volume = {Philosophy (Media and Governance)},
   7052         year = {2006},
   7053         month = jan,
   7054         pages = {0--231},
   7055         school = {Keio University,},
   7056         address = {Washington, DC, USA},
   7057   keywords = {i-WAT, OpenPGP, WAT system},
   7058         url = {http://www.sfc.wide.ad.jp/dissertation/ks91_e.html},
   7059         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Saito\%20-\%20i-WAT\%20Dissertation.pdf},
   7060         author = {Saito, Kenji}
   7061 }
   7062 @book {2006_12,
   7063         title = {Less Hashing, Same Performance: Building a Better Bloom Filter},
   7064         booktitle = {Algorithms -- ESA 2006},
   7065         series = {Lecture Notes in Computer Science},
   7066         volume = {4168},
   7067         year = {2006},
   7068         pages = {456--467},
   7069         publisher = {Springer Berlin Heidelberg},
   7070         organization = {Springer Berlin Heidelberg},
   7071         abstract = {A standard technique from the hashing literature is to use two hash functions h1(x) and h2(x) to simulate additional hash functions of the form gi (x) = h1(x) + ih2(x). We demonstrate that this technique can be usefully applied to Bloom filters and related data structures. Specifically, only two hash functions are necessary to effectively implement a Bloom filter without any loss in the asymptotic false positive probability. This leads to less computation and potentially less need for randomness in practice},
   7072         isbn = {978-3-540-38875-3},
   7073         doi = {10.1007/11841036_42},
   7074         url = {http://dx.doi.org/10.1007/11841036_42},
   7075         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LessHashing2006Kirsch.pdf},
   7076         author = {Kirsch, Adam and Mitzenmacher, Michael},
   7077         editor = {Azar, Yossi and Erlebach, Thomas}
   7078 }
   7079 @conference {cview:pet2006,
   7080         title = {Linking Anonymous Transactions: The Consistent View Attack},
   7081         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   7082         year = {2006},
   7083         month = {June},
   7084         pages = {384--392},
   7085         publisher = {Springer},
   7086         organization = {Springer},
   7087         address = {Cambridge, UK},
   7088         abstract = {In this paper we study a particular attack that may be launched by cooperating organisations in order to link the transactions and the pseudonyms of the users of an anonymous credential system. The results of our analysis are both positive and negative. The good (resp. bad) news, from a privacy protection (resp. evidence gathering) viewpoint, is that the attack may be computationally intensive. In particular, it requires solving a problem that is polynomial time equivalent to ALLSAT . The bad (resp. good) news is that a typical instance of this problem may be efficiently solvable},
   7089         www_section = {privacy, pseudonym},
   7090         isbn = {978-3-540-68790-0},
   7091         doi = {10.1007/11957454},
   7092         url = {http://www.springerlink.com/content/y6l6412387663581/},
   7093         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cview-pet2006.pdf},
   7094         author = {Andreas Pashalidis and Bernd Meyer},
   7095         editor = {George Danezis and Philippe Golle}
   7096 }
   7097 @conference {di06linyphi,
   7098         title = {Linyphi: An IPv6-Compatible Implementation of SSR},
   7099         booktitle = {Proceedings of the Third International Workshop on Hot Topics in Peer-to-Peer Systems},
   7100         year = {2006},
   7101         type = {publication},
   7102         address = {Rhodes Island, Greec},
   7103         abstract = {Scalable source routing (SSR) is a self-organizing routing protocol designed for supporting peer-to-peer applications. It is especially suited for networks that do not have a well crafted structure, e. g. ad-hoc and mesh-networks. SSR is based on the combination of source routes and a virtual ring structure. This ring is used in a Chord-like manner to obtain source routes to destinations that are not yet in the respective router cache. This approach makes SSR more message efficient than flooding based ad-hoc routing protocols. Moreover, it directly provides the semantics of a structured routing overlay. In this paper we present Linyphi, an implementation of SSR for wireless accesses routers. Linyphi combines IPv6 and SSR so that unmodified IPv6 hosts have transparent connectivity to both the Linyphi mesh network and the IPv4/v6 Internet. We give a basic outline of the implementation and demonstrate its suitability in real-world mesh network scenarios. Linyphi is available for download (www.linyphi.net)},
   7104         www_section = {scalable source routing},
   7105         isbn = {1-4244-0054-6},
   7106         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   7107         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di06linyphi.pdf},
   7108         author = {Di, Pengfei and Massimiliano Marcon and Thomas Fuhrmann}
   7109 }
   7110 @conference {hs-attack06,
   7111         title = {Locating Hidden Servers},
   7112         booktitle = {Proceedings of the 2006 IEEE Symposium on Security and Privacy},
   7113         year = {2006},
   7114         month = {May},
   7115         publisher = {IEEE CS},
   7116         organization = {IEEE CS},
   7117         abstract = {Hidden services were deployed on the Tor anonymous communication network in 2004. Announced properties include server resistance to distributed DoS. Both the EFF and Reporters Without Borders have issued guides that describe using hidden services via Tor to protect the safety of dissidents as well as to resist censorship. We present fast and cheap attacks that reveal the location of a hidden server. Using a single hostile Tor node we have located deployed hidden servers in a matter of minutes. Although we examine hidden services over Tor, our results apply to any client using a variety of anonymity networks. In fact, these are the first actual intersection attacks on any deployed public network: thus confirming general expectations from prior theory and simulation. We recommend changes to route selection design and implementation for Tor. These changes require no operational increase in network overhead and are simple to make; but they prevent the attacks we have demonstrated. They have been implemented},
   7118         www_section = {anonymity measurement, Guard, Tor},
   7119         isbn = {0-7695-2574-1},
   7120         doi = {10.1109/SP.2006.24},
   7121         url = {http://portal.acm.org/citation.cfm?id=1130366},
   7122         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hs-attack06.pdf},
   7123         author = {Lasse {\O}verlier and Paul Syverson}
   7124 }
   7125 @conference {icdcs2006:m2,
   7126         title = {M2: Multicasting Mixes for Efficient and Anonymous Communication},
   7127         booktitle = {Proceedings of the 26th IEEE Conference on Distributed Computing Systems},
   7128         year = {2006},
   7129         month = {July},
   7130         abstract = {We present a technique to achieve anonymous multicasting in mix networks to deliver content from producers to consumers. Employing multicast allows content producers to send (and mixes to forward) information to multiple consumers without repeating work for each individual consumer. In our approach, consumers register interest for content by creating paths in the mix network to the content's producers. When possible, these paths are merged in the network so that paths destined for the same producer share a common path suffix to the producer. When a producer sends content, the content travels this common suffix toward its consumers (in the reverse direction) and "branches" into multiple messages when necessary. We detail the design of this technique and then analyze the unlinkability of our approach against a global, passive adversary who controls both the producer and some mixes. We show that there is a subtle degradation of unlinkability that arises from multicast. We discuss techniques to tune our design to mitigate this degradation while retaining the benefits of multicast},
   7131         www_section = {anonymous multicast},
   7132         isbn = {0-7695-2540-7 },
   7133         doi = {10.1109/ICDCS.2006.53 },
   7134         url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F10967\%2F34569\%2F01648846.pdf\%3Ftp\%3D\%26isnumber\%3D\%26arnumber\%3D1648846\&authDecision=-203},
   7135         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/icdcs2006-m2.pdf},
   7136         author = {Ginger Perng and Michael K. Reiter and Chenxi Wang}
   7137 }
   7138 @conference {ShWa-Relationship,
   7139         title = {Measuring Relationship Anonymity in Mix Networks},
   7140         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2006)},
   7141         year = {2006},
   7142         month = {October},
   7143         publisher = {ACM  New York, NY, USA},
   7144         organization = {ACM  New York, NY, USA},
   7145         abstract = {Many applications of mix networks such as anonymousWeb browsing require relationship anonymity: it should be hard for the attacker to determine who is communicating with whom. Conventional methods for measuring anonymity, however, focus on sender anonymity instead. Sender anonymity guarantees that it is difficult for the attacker to determine the origin of any given message exiting the mix network, but this may not be sufficient to ensure relationship anonymity. Even if the attacker cannot identify the origin of messages arriving to some destination, relationship anonymity will fail if he can determine with high probability that at least one of the messages originated from a particular sender, without necessarily being able to recognize this message among others. We give a formal definition and a calculation methodology for relationship anonymity. Our techniques are similar to those used for sender anonymity, but, unlike sender anonymity, relationship anonymity is sensitive to the distribution of message destinations. In particular, Zipfian distributions with skew values characteristic of Web browsing provide especially poor relationship anonymity. Our methodology takes route selection algorithms into account, and incorporates information-theoretic metrics such as entropy and min-entropy. We illustrate our methodology by calculating relationship anonymity in several simulated mix networks},
   7146         www_section = {anonymity, privacy},
   7147         isbn = {1-59593-556-8},
   7148         doi = {10.1145/1179601.1179611},
   7149         url = {http://portal.acm.org/citation.cfm?id=1179611},
   7150         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ShWa-Relationship.pdf},
   7151         author = {Vitaly Shmatikov and Ming-Hsui Wang}
   7152 }
   7153 @article {Godfrey:2006:MCD:1151659.1159931,
   7154         title = {Minimizing churn in distributed systems},
   7155         journal = {SIGCOMM Computer Communication Review},
   7156         volume = {36},
   7157         year = {2006},
   7158         month = aug,
   7159         pages = {147--158},
   7160         publisher = {ACM},
   7161         address = {New York, NY, USA},
   7162         abstract = {A pervasive requirement of distributed systems is to deal with churn-change in the set of participating nodes due to joins, graceful leaves, and failures. A high churn rate can increase costs or decrease service quality. This paper studies how to reduce churn by selecting which subset of a set of available nodes to use.First, we provide a comparison of the performance of a range of different node selection strategies in five real-world traces. Among our findings is that the simple strategy of picking a uniform-random replacement whenever a node fails performs surprisingly well. We explain its performance through analysis in a stochastic model.Second, we show that a class of strategies, which we call "Preference List" strategies, arise commonly as a result of optimizing for a metric other than churn, and produce high churn relative to more randomized strategies under realistic node failure patterns. Using this insight, we demonstrate and explain differences in performance for designs that incorporate varying degrees of randomization. We give examples from a variety of protocols, including anycast, over-lay multicast, and distributed hash tables. In many cases, simply adding some randomization can go a long way towards reducing churn},
   7163         www_section = {churn, distributed hash table, multicast, node selection},
   7164         issn = {0146-4833},
   7165         doi = {http://doi.acm.org/10.1145/1151659.1159931},
   7166         url = {http://doi.acm.org/10.1145/1151659.1159931},
   7167         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comp.\%20Comm.\%20Rev.\%20-\%20Minimizing\%20churn\%20in\%20distributed\%20systems.pdf},
   7168         author = {Godfrey, Brighten and S Shenker and Ion Stoica}
   7169 }
   7170 @booklet {Stefansson06myriadstore,
   7171         title = {MyriadStore: A Peer-to-Peer Backup System},
   7172         year = {2006},
   7173         abstract = {Traditional backup methods are error prone, cumbersome and expensive. Distributed backup applications have emerged as promising tools able to avoid these disadvantages, by exploiting unused disk space of remote computers. In this paper we propose MyriadStore, a distributed peer-to-peer backup system. MyriadStore makes use of a trading scheme that ensures that a user has as much available storage space in the system as the one he/she contributes to it. A mechanism for making challenges between the system's nodes ensures that this restriction is fulfilled. Furthermore, MyriadStore minimizes bandwidth requirements and migration costs by treating separately the storage of the system's meta-data and the storage of the backed up data. This approach also offers great flexibility on the placement of the backed up data, a property that facilitates the deployment of the trading scheme},
   7174         www_section = {backup, P2P},
   7175         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.6985},
   7176         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.126.5915.pdf},
   7177         author = {Birgir Stefansson and Antonios Thodis and Ali Ghodsi and Seif Haridi}
   7178 }
   7179 @article {Fragouli:2006:NCI:1111322.1111337,
   7180         title = {Network Coding: an Instant Primer},
   7181         journal = {SIGCOMM Computer Communication Review},
   7182         volume = {36},
   7183         year = {2006},
   7184         month = jan,
   7185         pages = {63--68},
   7186         publisher = {ACM},
   7187         address = {New York, NY, USA},
   7188         abstract = {Network coding is a new research area that may have interesting applications in practical networking systems. With network coding, intermediate nodes may send out packets that are linear combinations of previously received information. There are two main benefits of this approach: potential throughput improvements and a high degree of robustness. Robustness translates into loss resilience and facilitates the design of simple distributed algorithms that perform well, even if decisions are based only on partial information. This paper is an instant primer on network coding: we explain what network coding does and how it does it. We also discuss the implications of theoretical results on network coding for realistic settings and show how network coding can be used in practice},
   7189         www_section = {network coding},
   7190         issn = {0146-4833},
   7191         doi = {http://doi.acm.org/10.1145/1111322.1111337},
   7192         url = {http://doi.acm.org/10.1145/1111322.1111337},
   7193         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev\%20-\%20Network\%20Coding\%3A\%20an\%20Instant\%20Primer.pdf},
   7194         author = {Fragouli, Christina and Jean-Yves Le Boudec and J{\"o}rg Widmer}
   7195 }
   7196 @conference {wpes06:heydt-benjamin,
   7197         title = {Nonesuch: a mix network with sender unobservability},
   7198         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2006)},
   7199         year = {2006},
   7200         pages = {1--8},
   7201         publisher = {ACM Press},
   7202         organization = {ACM Press},
   7203         address = {New York, NY, USA},
   7204         abstract = {Oblivious submission to anonymity systems is a process by which a message may be submitted in such a way that neither the anonymity network nor a global passive adversary may determine that a valid message has been sent. We present Nonesuch: a mix network with steganographic submission and probabilistic identification and attenuation of cover traffic. In our system messages are submitted as stegotext hidden inside Usenet postings. The steganographic extraction mechanism is such that the the vast majority of the Usenet postings which do not contain keyed stegotext will produce meaningless output which serves as cover traffic, thus increasing the anonymity of the real messages. This cover traffic is subject to probabilistic attenuation in which nodes have only a small probability of distinguishing cover messages from "real" messages. This attenuation prevents cover traffic from travelling through the network in an infinite loop, while making it infeasible for an entrance node to distinguish senders},
   7205         www_section = {oblivious circuits, public key cryptography, steganography, unobservability},
   7206         isbn = {1-59593-556-8},
   7207         doi = {10.1145/1179601.1179603 },
   7208         url = {http://portal.acm.org/citation.cfm?id=1179601.1179603},
   7209         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes06-heydt-benjamin.pdf},
   7210         author = {Andrei Serjantov and Benessa Defend}
   7211 }
   7212 @conference {Tati06onobject,
   7213         title = {On Object Maintenance in Peer-to-Peer Systems},
   7214         booktitle = {IPTPS'06--Proceedings of the 5th International Workshop on Peer-to-Peer Systems},
   7215         year = {2006},
   7216         month = feb,
   7217         address = {Santa Barbara, CA, USA},
   7218         abstract = {This paper, we revisit object maintenance in peer-to-peer systems, focusing on how temporary and permanent churn impact the overheads associated with object maintenance. We have a number of goals: to highlight how different environments exhibit different degrees of temporary and permanent churn; to provide further insight into how churn in different environments affects the tuning of object maintenance strategies; and to examinehow object maintenance and churn interact with other constraints such as storage capacity. When possible, we highlight behavior independent of particular object maintenance strategies. When an issue depends on a particular strategy, though, we explore it in the context of a strategy in essence similar to TotalRecall, which uses erasure coding, lazy repair of data blocks, and random indirect placement (we also assume that repairs incorporate remaining blocks rather than regenerating redundancy from scratch)},
   7219         www_section = {churn, P2P, peer-to-peer networking},
   7220         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2706\%20-\%20On\%20object\%20maintenance\%20in\%20p2p\%20systems.pdf},
   7221         author = {Kiran Tati and Geoffrey M. Voelker}
   7222 }
   7223 @conference {1128335,
   7224         title = {OmniStore: A system for ubiquitous personal storage management},
   7225         booktitle = {PERCOM '06: Proceedings of the Fourth Annual IEEE International Conference on Pervasive Computing and Communications},
   7226         year = {2006},
   7227         pages = {136--147},
   7228         publisher = {IEEE Computer Society},
   7229         organization = {IEEE Computer Society},
   7230         address = {Washington, DC, USA},
   7231         abstract = {As personal area networking becomes a reality, the collective management of storage in portable devices such as mobile phones, cameras and music players will grow in importance. The increasing wireless communication capability of such devices makes it possible for them to interact with each other and implement more advanced storage functionality. This paper introduces OmniStore, a system which employs a unified data management approach that integrates portable and backend storage, but also exhibits self-organizing behavior through spontaneous device collaboration},
   7232         isbn = {0-7695-2518-0},
   7233         doi = {10.1109/PERCOM.2006.40},
   7234         url = {http://portal.acm.org/citation.cfm?id=1128335$\#$},
   7235         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.96.4283.pdf},
   7236         author = {Alexandros Karypidis and Spyros Lalis}
   7237 }
   7238 @conference {Fitzi:2006:OEM:1146381.1146407,
   7239         title = {Optimally efficient multi-valued byzantine agreement},
   7240         booktitle = {Proceedings of the twenty-fifth annual ACM symposium on Principles of distributed computing},
   7241         series = {PODC '06},
   7242         year = {2006},
   7243         pages = {163--168},
   7244         publisher = {ACM},
   7245         organization = {ACM},
   7246         address = {New York, NY, USA},
   7247         www_section = {byzantine agreement, communication complexity, cryptographic security, information-theoretic security},
   7248         isbn = {1-59593-384-0},
   7249         doi = {10.1145/1146381.1146407},
   7250         url = {http://doi.acm.org/10.1145/1146381.1146407},
   7251         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FitHir06.pdf},
   7252         author = {Fitzi, Matthias and Hirt, Martin}
   7253 }
   7254 @conference {2006_13,
   7255         title = {Our Data, Ourselves: Privacy via Distributed Noise Generation},
   7256         booktitle = {Proceedings of the 24th Annual International Conference on The Theory and Applications of Cryptographic Techniques},
   7257         year = {2006},
   7258         publisher = {Springer-Verlag},
   7259         organization = {Springer-Verlag},
   7260         address = {Berlin, Heidelberg},
   7261         abstract = {In this work we provide efficient distributed protocols for generating shares of random noise, secure against malicious participants. The purpose of the noise generation is to create a distributed implementation of the privacy-preserving statistical databases described in recent papers [14, 4, 13]. In these databases, privacy is obtained by perturbing the true answer to a database query by the addition of a small amount of
   7262 Gaussian or exponentially distributed random noise. The computational power of even a simple form of these databases, when the query is just of the
   7263 form sum over all rows 'i' in the database of a function
   7264 <i> f </i> applied to the data in row i, has been demonstrated in [4]. A distributed implementation eliminates the need for a trusted database administrator. The results for noise generation are of independent interest. The generation of Gaussian noise introduces a technique for distributing shares of many unbiased coins with fewer executions of verifiable secret sharing than would be needed using previous approaches (reduced by a factor of n). The generation of exponentially distributed noise uses
   7265 two shallow circuits: one for generating many arbitrarily but identically biased coins at an amortized cost of two unbiased random bits apiece, independent of the bias, and the other to combine bits of appropriate biases to obtain an exponential distribution},
   7266         isbn = {3-540-34546-9, 978-3-540-34546-6},
   7267         doi = {10.1007/11761679_29},
   7268         url = {http://dx.doi.org/10.1007/11761679_29},
   7269         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OurData2006Dwork.pdf},
   7270         author = {Dwork, Cynthia and Kenthapadi, Krishnaram and McSherry, Frank and Mironov, Ilya and Naor, Moni}
   7271 }
   7272 @booklet {Aad06packetcoding,
   7273         title = {Packet coding for strong anonymity in ad hoc networks},
   7274         year = {2006},
   7275         abstract = {Several techniques to improve anonymity have been proposed in the literature. They rely basically on multicast or on onion routing to thwart global attackers or local attackers respectively. None of the techniques provide a combined solution due to the incompatibility between the two components, as we show in this paper. We propose novel packet coding techniques that make the combination possible, thus integrating the advantages in a more complete and robust solution},
   7276         www_section = {anonymity, onion routing, robustness},
   7277         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.88.2407},
   7278         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2407_0.pdf},
   7279         author = {Imad Aad and Claude Castelluccia and Jean-Pierre Hubaux}
   7280 }
   7281 @article {Marx:2006:PGS:1140638.1140647,
   7282         title = {Parameterized graph separation problems},
   7283         journal = {Theoretical Computer Science},
   7284         volume = {351},
   7285         year = {2006},
   7286         month = feb,
   7287         pages = {394--406},
   7288         publisher = {Elsevier Science Publishers Ltd},
   7289         address = {Essex, UK},
   7290         abstract = {We consider parameterized problems where some separation property has to be achieved by deleting as few vertices as possible. The following five problems are studied: delete k vertices such that (a) each of the given l terminals is separated from the others, (b) each of the given l pairs of terminals is separated, (c) exactly l vertices are cut away from the graph, (d) exactly l connected vertices are cut away from the graph, (e) the graph is separated into at least l components. We show that if both k and l are parameters, then (a), (b) and (d) are fixed-parameter tractable, while (c) and (e) are W[1]-hard},
   7291         www_section = {multicasting, multiway cut, parameterized complexity, separator},
   7292         issn = {0304-3975},
   7293         doi = {10.1016/j.tcs.2005.10.007},
   7294         url = {http://dl.acm.org/citation.cfm?id=1140638.1140647},
   7295         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Marx\%20-\%20Parameterized\%20graph\%20separation\%20problems.pdf},
   7296         author = {Marx, D{\'a}niel}
   7297 }
   7298 @conference {Aekaterinidis2006PastryStrings,
   7299         title = {PastryStrings: A Comprehensive Content-Based Publish/Subscribe DHT Network},
   7300         booktitle = {Proceedings of the 26th IEEE International Conference on Distributed Computing Systems},
   7301         series = {ICDCS '06},
   7302         year = {2006},
   7303         pages = {0--23},
   7304         publisher = {IEEE Computer Society},
   7305         organization = {IEEE Computer Society},
   7306         address = {Washington, DC, USA},
   7307         isbn = {0-7695-2540-7},
   7308         doi = {10.1109/ICDCS.2006.63},
   7309         url = {http://dx.doi.org/10.1109/ICDCS.2006.63},
   7310         author = {Aekaterinidis, Ioannis and Triantafillou, Peter}
   7311 }
   7312 @conference {Massoulie:2006:PCS:1146381.1146402,
   7313         title = {Peer counting and sampling in overlay networks: random walk methods},
   7314         booktitle = {PODC '06--Proceedings of the 25th Annual ACM Symposium on Principles of Distributed Computing},
   7315         series = {PODC '06},
   7316         year = {2006},
   7317         month = jul,
   7318         pages = {123--132},
   7319         publisher = {ACM},
   7320         organization = {ACM},
   7321         address = {Denver, Colorado, USA},
   7322         abstract = {In this article we address the problem of counting the number of peers in a peer-to-peer system, and more generally of aggregating statistics of individual peers over the whole system. This functionality is useful in many applications, but hard to achieve when each node has only a limited, local knowledge of the whole system. We propose two generic techniques to solve this problem. The Random Tour method is based on the return time of a continuous time random walk to the node originating the query. The Sample and Collide method is based on counting the number of random samples gathered until a target number of redundant samples are obtained. It is inspired by the "birthday paradox" technique of [6], upon which it improves by achieving a target variance with fewer samples. The latter method relies on a sampling sub-routine which returns randomly chosen peers. Such a sampling algorithm is of independent interest. It can be used, for instance, for neighbour selection by new nodes joining the system. We use a continuous time random walk to obtain such samples. We analyse the complexity and accuracy of the two methods. We illustrate in particular how expansion properties of the overlay affect their performance},
   7323         www_section = {expander graphs, random walks, sampling},
   7324         isbn = {1-59593-384-0},
   7325         doi = {http://doi.acm.org/10.1145/1146381.1146402},
   7326         url = {http://doi.acm.org/10.1145/1146381.1146402},
   7327         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PODC\%2706\%20-\%20Peer\%20counting\%20and\%20sampling\%20in\%20overlay\%20networks.pdf},
   7328         author = {Massouli{\'e}, Laurent and Erwan Le Merrer and Anne-Marie Kermarrec and Ganesh, Ayalvadi}
   7329 }
   7330 @conference {2006_14,
   7331         title = {Peer to peer size estimation in large and dynamic networks: A comparative study},
   7332         booktitle = {HPDC'06--15th IEEE International Symposium on High Performance Distributed Computing},
   7333         year = {2006},
   7334         month = jun,
   7335         publisher = {IEEE Computer Society},
   7336         organization = {IEEE Computer Society},
   7337         address = {Paris, France},
   7338         abstract = {As the size of distributed systems keeps growing, the peer to peer communication paradigm has been identified as the key to scalability. Peer to peer overlay networks are characterized by their self-organizing capabilities, resilience to failure and fully decentralized control. In a peer to peer overlay, no entity has a global knowledge of the system. As much as this property is essential to ensure the scalability, monitoring the system under such circumstances is a complex task. Yet, estimating the size of the system is core functionality for many distributed applications to parameter setting or monitoring purposes. In this paper, we propose a comparative study between three algorithms that estimate in a fully decentralized way the size of a peer to peer overlay. Candidate approaches are generally applicable irrespective of the underlying structure of the peer to peer overlay. The paper reports the head to head comparison of estimation system size algorithms. The simulations have been conducted using the same simulation framework and inputs and highlight the differences in cost and accuracy of the estimation between the algorithms both in static and dynamic settings},
   7339         www_section = {comparison, counting, network size estimation, peer to peer},
   7340         isbn = {1-4244-0307-3 },
   7341         doi = {http://dx.doi.org/10.1109/HPDC.2006.1652131},
   7342         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HPDC\%2706\%20-\%20Peer\%20to\%20peer\%20size\%20estimation\%20in\%20large\%20and\%20dynamic\%20networks.pdf},
   7343         author = {Erwan Le Merrer and Anne-Marie Kermarrec and Massouli{\'e}, Laurent}
   7344 }
   7345 @conference {1161264,
   7346         title = {Performance evaluation of chord in mobile ad hoc networks},
   7347         booktitle = {MobiShare '06: Proceedings of the 1st international workshop on Decentralized resource sharing in mobile computing and networking},
   7348         year = {2006},
   7349         pages = {48--53},
   7350         publisher = {ACM},
   7351         organization = {ACM},
   7352         address = {New York, NY, USA},
   7353         abstract = {Mobile peer-to-peer applications recently have received growing interest. However, it is often assumed that structured peer-to-peer overlays cannot efficiently operate in mobile ad hoc networks (MANETs). The prevailing opinion is that this is due to the protocols' high overhead cost. In this paper, we show that this opinion is misguided.We present a thorough simulation study evaluating Chord in the well-known MANET simulator GloMoSim. We found the main issue of deploying Chord in a MANET not to be its overhead, but rather the protocol's pessimistic timeout and failover strategy. This strategy enables fast lookup resolution in spite of highly dynamic node membership, which is a significant problem in the Internet context. However, with the inherently higher packet loss rate in a MANET, this failover strategy results in lookups being inconsistently forwarded even if node membership does not change},
   7354         www_section = {Chord, mobile Ad-hoc networks},
   7355         isbn = {1-59593-558-4},
   7356         doi = {10.1145/1161252.1161264},
   7357         url = {http://portal.acm.org/citation.cfm?id=1161264$\#$},
   7358         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p48-cramer_ACM2006.pdf},
   7359         author = {Cramer, Curt and Thomas Fuhrmann}
   7360 }
   7361 @article {albrecht2006planetlab,
   7362         title = {PlanetLab application management using Plush},
   7363         journal = {ACM SIGOPS Operating Systems Review},
   7364         volume = {40},
   7365         number = {1},
   7366         year = {2006},
   7367         pages = {33--40},
   7368         publisher = {ACM},
   7369         www_section = {application management, PlanetLab, plush, resource allocation, resource discovery},
   7370         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/plush.pdf},
   7371         author = {Albrecht, J. and Tuttle, C. and Snoeren, A.C. and Vahdat, A.}
   7372 }
   7373 @conference {heydt-benjamin:pet2006,
   7374         title = {Privacy for Public Transportation},
   7375         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   7376         year = {2006},
   7377         month = {June},
   7378         pages = {1--19},
   7379         publisher = {Springer},
   7380         organization = {Springer},
   7381         address = {Cambridge, UK},
   7382         abstract = {We propose an application of recent advances in e-cash, anonymous credentials, and proxy re-encryption to the problem of privacy in public transit systems with electronic ticketing. We discuss some of the interesting features of transit ticketing as a problem domain, and provide an architecture sufficient for the needs of a typical metropolitan transit system. Our system maintains the security required by the transit authority and the user while significantly increasing passenger privacy. Our hybrid approach to ticketing allows use of passive RFID transponders as well as higher powered computing devices such as smartphones or PDAs. We demonstrate security and privacy features offered by our hybrid system that are unavailable in a homogeneous passive transponder architecture, and which are advantageous for users of passive as well as active devices},
   7383         www_section = {anonymity, privacy, re-encryption},
   7384         isbn = {978-3-540-68790-0},
   7385         doi = {10.1007/11957454},
   7386         url = {http://www.springerlink.com/content/c75053mr42n82wv5/},
   7387         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/heydt-benjamin-pet2006.pdf},
   7388         author = {Thomas S. Heydt-Benjamin and Hee-Jin Chae and Benessa Defend and Kevin Fu},
   7389         editor = {George Danezis and Philippe Golle}
   7390 }
   7391 @conference {2006_15,
   7392         title = {Privacy Preserving Nearest Neighbor Search},
   7393         booktitle = {Data Mining Workshops, 2006. ICDM Workshops 2006. Sixth IEEE International Conference on},
   7394         year = {2006},
   7395         month = dec,
   7396         abstract = {Data mining is frequently obstructed by privacy concerns. In many cases data is distributed, and bringing the data together in one place for analysis is not possible due to privacy laws (e.g. HIPAA) or policies. Privacy preserving data mining techniques have been developed to address this issue by providing mechanisms to mine the data while giving certain privacy guarantees. In this work we address the issue of privacy preserving nearest neighbor search, which forms the kernel of many data mining applications. To this end, we present a novel algorithm based on secure multiparty computation primitives to compute the nearest neighbors of records in horizontally distributed data. We show how this algorithm can be used in three important data mining algorithms, namely LOF outlier detection, SNN clustering, and kNN classification},
   7397         www_section = {Clustering algorithms, Computer science, Conferences, cryptography, Data mining, data privacy, distributed computing, Kernel, kNN classification, LOF outlier detection, Medical diagnostic imaging, multiparty computation primitives, nearest neighbor search, Nearest neighbor searches, pattern clustering, privacy preservation, SNN clustering},
   7398         doi = {10.1109/ICDMW.2006.133},
   7399         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreserving2006Shaneck.pdf},
   7400         author = {Shaneck, M. and Yongdae Kim and Kumar, V.}
   7401 }
   7402 @conference { pianese:pulse,
   7403         title = {PULSE, a Flexible P2P Live Streaming System},
   7404         booktitle = {INFOCOM'06. Proceedings of the 25th IEEE International Conference on Computer Communications},
   7405         year = {2006},
   7406         month = apr,
   7407         pages = {-1--1},
   7408         publisher = {IEEE Computer Society},
   7409         organization = {IEEE Computer Society},
   7410         address = {Barcelona, Catalunya, Spain},
   7411         abstract = {With the widespread availability of inexpensive broadband Internet connections for home-users, a large number of bandwidth-intensive applications previously not feasible have now become practical. This is the case for multimedia live streaming, for which end-user's dial-up/ISDN modem connections once were the bottleneck. The bottleneck is now mostly found on the server side: the bandwidth required for serving many clients at once is large and thus very costly to the broadcasting entity. Peer-to-peer systems for on-demand and live streaming have proved to be an encouraging solution, since they can shift the burden of content distribution from the server to the users of the network. In this work we introduce PULSE, a P2P system for live streaming whose main goals are flexibility, scalability, and robustness. We present the fundamental concepts that stand behind the design of PULSE along with its intended global behavior, and describe in detail the main algorithms running on its nodes},
   7412         www_section = {peer-to-peer networking, pulse},
   7413         isbn = {1-4244-0221-2 },
   7414         doi = {http://dx.doi.org/10.1109/INFOCOM.2006.42},
   7415         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2706\%20-\%20Pianese\%2C\%20Keller\%20\%26\%20Biersack\%20-\%20PULSE.pdf},
   7416         author = {Fabio Pianese and Joaqu{\'\i}n Keller and E W Biersack}
   7417 }
   7418 @booklet {fuhrmann06pushing-tr,
   7419         title = {Pushing Chord into the Underlay: Scalable Routing for Hybrid MANETs},
   7420         number = {2006-12},
   7421         year = {2006},
   7422         publisher = {Fakult{\"a}t f{\"u}r Informatik, Universit{\"a}t Karlsruhe},
   7423         type = {Interner Bericht},
   7424         abstract = {SCALABLE SOURCE ROUTING is a novel routing approach for large unstructured networks, for example hybrid mobile ad hoc networks (MANETs), mesh networks, or sensor-actuator networks. It is especially suited for organically growing networks of many resource-limited mobile devices supported by a few fixed-wired nodes. SCALABLE SOURCE ROUTING is a full-fledged routing protocol that directly provides the semantics of a structured peer-to-peer overlay. Hence, it can serve as an efficient basis for fully decentralized applications on mobile devices. SCALABLE SOURCE ROUTING combines source routing in the physical network with Chord-like routing in the virtual ring formed by the address space. Message forwarding greedily decreases the distance in the virtual ring while preferring physically short paths. Unlike previous approaches, scalability is achieved without imposing artificial hierarchies or assigning location-dependent addresses. SCALABLE SOURCE ROUTING enables any-to-any communication in a flat address space without maintaining any-to-any routes. Each node proactively discovers its virtual vicinity using an iterative process. Additionally, it passively caches a limited amount of additional paths. By means of extensive simulation, we show that SCALABLE SOURCE ROUTING is resource-efficient and scalable well beyond 10,000 nodes},
   7425         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   7426         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann06pushing.pdf},
   7427         author = {Thomas Fuhrmann and Di, Pengfei and Kendy Kutzner and Cramer, Curt}
   7428 }
   7429 @conference {1109601,
   7430         title = {The rainbow skip graph: a fault-tolerant constant-degree distributed data structure},
   7431         booktitle = {SODA '06: Proceedings of the seventeenth annual ACM-SIAM symposium on Discrete algorithm},
   7432         year = {2006},
   7433         pages = {384--393},
   7434         publisher = {ACM},
   7435         organization = {ACM},
   7436         address = {New York, NY, USA},
   7437         abstract = {We present a distributed data structure, which we call the rainbow skip graph. To our knowledge, this is the first peer-to-peer data structure that simultaneously achieves high fault-tolerance, constant-sized nodes, and fast update and query times for ordered data. It is a non-trivial adaptation of the SkipNet/skip-graph structures of Harvey et al. and Aspnes and Shah, so as to provide fault-tolerance as these structures do, but to do so using constant-sized nodes, as in the family tree structure of Zatloukal and Harvey. It supports successor queries on a set of n items using O(log n) messages with high probability, an improvement over the expected O(log n) messages of the family tree. Our structure achieves these results by using the following new constructs:{\textbullet} Rainbow connections: parallel sets of pointers between related components of nodes, so as to achieve good connectivity between "adjacent" components, using constant-sized nodes.{\textbullet} Hydra components: highly-connected, highly fault-tolerant components of constant-sized nodes, which will contain relatively large connected subcomponents even under the failure of a constant fraction of the nodes in the component.We further augment the hydra components in the rainbow skip graph by using erasure-resilient codes to ensure that any large subcomponent of nodes in a hydra component is sufficient to reconstruct all the data stored in that component. By carefully maintaining the size of related components and hydra components to be O(log n), we are able to achieve fast times for updates and queries in the rainbow skip graph. In addition, we show how to make the communication complexity for updates and queries be worst case, at the expense of more conceptual complexity and a slight degradation in the node congestion of the data structure},
   7438         www_section = {distributed hash table, Hydra, rainbow, RSG, skip graph, SkipNet},
   7439         isbn = {0-89871-605-5},
   7440         doi = {http://doi.acm.org/10.1145/1109557.1109601},
   7441         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rainbow.pdf},
   7442         author = {Goodrich, Michael T. and Nelson, Michael J. and Sun, Jonathan Z.}
   7443 }
   7444 @article {1148681,
   7445         title = {Raptor codes},
   7446         journal = {IEEE/ACM Trans. Netw},
   7447         volume = {14},
   7448         number = {SI},
   7449         year = {2006},
   7450         pages = {2551--2567},
   7451         publisher = {IEEE Press},
   7452         address = {Piscataway, NJ, USA},
   7453         abstract = {LT-codes are a new class of codes introduced by Luby for the purpose of scalable and fault-tolerant distribution of data over computer networks. In this paper, we introduce Raptor codes, an extension of LT-codes with linear time encoding and decoding. We will exhibit a class of universal Raptor codes: for a given integer k and any real {\epsilon} > 0, Raptor codes in this class produce a potentially infinite stream of symbols such that any subset of symbols of size k(1 + {\epsilon}) is sufficient to recover the original k symbols with high probability. Each output symbol is generated using O(log(1/ {\epsilon})) operations, and the original symbols are recovered from the collected ones with O(k log(1/{\epsilon})) operations.We will also introduce novel techniques for the analysis of the error probability of the decoder for finite length Raptor codes. Moreover, we will introduce and analyze systematic versions of Raptor codes, i.e., versions in which the first output elements of the coding system coincide with the original k elements},
   7454         www_section = {802.11, encoding, erasure coding},
   7455         issn = {1063-6692},
   7456         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raptor.pdf},
   7457         author = {M. Amin Shokrollahi}
   7458 }
   7459 @article {2006_16,
   7460         title = {Reactive Clustering in MANETs },
   7461         journal = { International Journal of Pervasive Computing and Communications},
   7462         volume = {2},
   7463         year = {2006},
   7464         pages = {81--90},
   7465         abstract = {Many clustering protocols for mobile ad hoc networks (MANETs) have been proposed in the literature. With only one exception so far (1), all these protocols are proactive, thus wasting bandwidth when their function is not currently needed. To reduce the signalling traffic load, reactive clustering may be employed.We have developed a clustering protocol named {\textquotedblleft}On-Demand Group Mobility-Based Clustering{\textquotedblright} (ODGMBC) (2), (3) which is reactive. Its goal is to build clusters as a basis for address autoconfiguration and hierarchical routing. In contrast to the protocol described in ref. (1), the design process especially addresses the notions of group mobility and of multi-hop clusters in a MANET. As a result, ODGMBC maps varying physical node groups onto logical clusters. In this paper, ODGMBC is described. It was implemented for the ad hoc network simulator GloMoSim (4) and evaluated using several performance indicators. Simulation results are promising and show that ODGMBC leads to stable clusters. This stability is advantageous for autoconfiguration and routing mechansims to be employed in conjunction with the clustering algorithm},
   7466         www_section = {mobile Ad-hoc networks, multi-hop networks},
   7467         doi = {10.1108/17427370780000143},
   7468         publisher = {unknown},
   7469         url = {http://www.emeraldinsight.com/journals.htm?articleid=1615724\&show=pdf},
   7470         author = {Cramer, Curt and Oliver Stanze and Kilian Weniger and Martina Zitterbart}
   7471 }
   7472 @article {regroup2006,
   7473         title = {Regroup-And-Go mixes to counter the (n-1) attack},
   7474         journal = {Journal of Internet Research},
   7475         volume = {16},
   7476         number = {2},
   7477         year = {2006},
   7478         pages = {213--223},
   7479         publisher = {Emerald Group Publishing Limited},
   7480         type = {Journal},
   7481         abstract = {The (n-1) attack is the most powerful attack against mix which is the basic building block of many modern anonymous systems. This paper aims to present a strategy that can be implemented in mix networks to detect and counter the active attacks, especially the (n-1) attack and its variants },
   7482         www_section = {anonymity, mix, privacy},
   7483         issn = {1066-2243 },
   7484         doi = {10.1108/10662240610656528},
   7485         url = {http://www.emeraldinsight.com/Insight/viewContentItem.do;jsessionid=6C3CF32A99DF3971C2144B461C8F2CF5?contentType=Article\&hdAction=lnkpdf\&contentId=1550662},
   7486         author = {Jin-Qiao Shi and Bin-Xing Fang and Li-Jie Shao}
   7487 }
   7488 @book {2006_17,
   7489         title = {Reputation Mechanisms},
   7490         booktitle = {Handbook on Information Systems and Economics},
   7491         year = {2006},
   7492         pages = {629--660},
   7493         publisher = {Elsevier},
   7494         organization = {Elsevier},
   7495         www_section = {online marketplace, reputation mechanism},
   7496         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dellarocas\%20-\%20Reputation\%20Mechanisms.pdf},
   7497         author = {Chrysanthos Dellarocas}
   7498 }
   7499 @booklet {Miller06robustcomposition:,
   7500         title = {Robust Composition: Towards a Unified Approach to Access Control and Concurrency Control},
   7501         year = {2006},
   7502         abstract = {Permission is hereby granted to make and distribute verbatim copies of this document without royalty or fee. Permission is granted to quote excerpts from this documented provided the original source is properly cited. ii When separately written programs are composed so that they may cooperate, they may instead destructively interfere in unanticipated ways. These hazards limit the scale and functionality of the software systems we can successfully compose. This dissertation presents a framework for enabling those interactions between components needed for the cooperation we intend, while minimizing the hazards of destructive interference. Great progress on the composition problem has been made within the object paradigm, chiefly in the context of sequential, single-machine programming among benign components. We show how to extend this success to support robust composition of concurrent and potentially malicious components distributed over potentially malicious machines. We present E, a distributed, persistent, secure programming language, and CapDesk, a virus-safe desktop built in E, as embodiments of the techniques we explain},
   7503         www_section = {robustness},
   7504         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.4674},
   7505         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.101.4674.pdf},
   7506         author = {Mark Samuel Miller}
   7507 }
   7508 @conference {Salsa,
   7509         title = {Salsa: A Structured Approach to Large-Scale Anonymity},
   7510         booktitle = {Proceedings of CCS 2006},
   7511         year = {2006},
   7512         month = {October},
   7513         publisher = {ACM  New York, NY, USA},
   7514         organization = {ACM  New York, NY, USA},
   7515         abstract = {Highly distributed anonymous communications systems have the promise to reduce the effectiveness of certain attacks and improve scalability over more centralized approaches. Existing approaches, however, face security and scalability issues. Requiring nodes to have full knowledge of the other nodes in the system, as in Tor and Tarzan, limits scalability and can lead to intersection attacks in peer-to-peer configurations. MorphMix avoids this requirement for complete system knowledge, but users must rely on untrusted peers to select the path. This can lead to the attacker controlling the entire path more often than is acceptable.To overcome these problems, we propose Salsa, a structured approach to organizing highly distributed anonymous communications systems for scalability and security. Salsa is designed to select nodes to be used in anonymous circuits randomly from the full set of nodes, even though each node has knowledge of only a subset of the network. It uses a distributed hash table based on hashes of the nodes' IP addresses to organize the system. With a virtual tree structure, limited knowledge of other nodes is enough to route node lookups throughout the system. We use redundancy and bounds checking when performing lookups to prevent malicious nodes from returning false information without detection. We show that our scheme prevents attackers from biasing path selection, while incurring moderate overheads, as long as the fraction of malicious nodes is less than 20\%. Additionally, the system prevents attackers from obtaining a snapshot of the entire system until the number of attackers grows too large (e.g. 15\% for 10000 peers and 256 groups). The number of groups can be used as a tunable parameter in the system, depending on the number of peers, that can be used to balance performance and security},
   7516         www_section = {P2P, privacy},
   7517         isbn = {1-59593-518-5},
   7518         doi = {10.1145/1180405.1180409},
   7519         url = {http://portal.acm.org/citation.cfm?id=1180409},
   7520         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Salsa.pdf},
   7521         author = {Arjun Nambiar}
   7522 }
   7523 @conference {2006_18,
   7524         title = {Scalable Routing in Sensor Actuator Networks with Churn},
   7525         booktitle = {Sensor and Ad Hoc Communications and Networks, 2006. SECON '06. 2006 3rd Annual IEEE Communications Society on },
   7526         year = {2006},
   7527         month = sep,
   7528         abstract = {Routing in wireless networks is inherently difficult since their network topologies are typically unstructured and unstable. Therefore, many routing protocols for ad-hoc networks and sensor networks revert to flooding to acquire routes to previously unknown destinations. However, such an approach does not scale to large networks, especially when nodes need to communicate with many different destinations. This paper advocates a novel approach, the scalable source routing (SSR) protocol. It combines overlay-like routing in a virtual network structure with source routing in the physical network structure. As a consequence, SSR can efficiently provide the routing semantics of a structured routing overlay, making it an efficient basis for the scalable implementation of fully decentralized applications. In T. Fuhrmann (2005) it has been demonstrated that SSR can almost entirely avoid flooding, thus leading to a both memory and message efficient routing mechanism for large unstructured networks. This paper extends SSR to unstable networks, i. e. networks with churn where nodes frequently join and leave, the latter potentially ungracefully},
   7529         www_section = {ad-hoc networks, scalable source routing},
   7530         author = {unknown},
   7531         isbn = {1-4244-0626-9 },
   7532         doi = {10.1109/SAHCN.2006.288406 },
   7533         url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4068086\%2F4068087\%2F04068105.pdf\%3Farnumber\%3D4068105\&authDecision=-203}
   7534 }
   7535 @conference {Atallah2006,
   7536         title = {Secure Collaborative Planning, Forecasting, and Replenishment},
   7537         booktitle = {Proceedings of Multi-Echelon/Public Applications of Supply Chain Management Conference},
   7538         year = {2006},
   7539         note = {only published on CD},
   7540         pages = {1--52},
   7541         abstract = {Although the benefits of information sharing between supply-chain partners are well known, many companies are averse to share their {\textquotedblleft}private{\textquotedblright} information due to fear of adverse impact of information leakage.
   7542 This paper uses techniques from Secure Multiparty Computation (SMC) to develop {\textquotedblleft}secure protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting, and Replenishment) business process. The result is a process that permits supply-chain partners to capture all of the benefits of information-sharing and collaborative decision-making, but without disclosing their {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions) and cost information to one another. In our collaborative CPFR) scenario, the retailer and supplier engage in SMC protocols that result in: (1) a forecast that uses both the retailers and the suppliers observed demand signals to better forecast demand; and (2) prescribed order/shipment quantities based on system-wide costs and inventory levels (and on the joint forecasts) that minimize supply-chain expected cost/period. Our contributions are as follows: (1) we demonstrate that CPFR can be securely implemented without disclosing the private information of either partner; (2) we show that the CPFR business process is not incentive compatible without transfer payments and develop an incentive-compatible linear transfer-payment scheme for
   7543 collaborative forecasting; (3) we demonstrate that our protocols are not only secure (i.e., privacy preserving), but that neither partner is able to make accurate inferences about the others future demand signals from the outputs of the protocols; and (4) we illustrate the benefits of secure collaboration using simulation},
   7544         www_section = {chain computation management, CPFR, privacy, secure multi-party computation, secure supply, security, SMC},
   7545         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Secure\%20Collaborative\%20Planning\%20Forecasting\%20and\%20Replenishment.pdf},
   7546         author = {Atallah, Mikhail and Marina Blanton and Vinayak Deshpand and Frikken, Keith and Li, Jiangtao and Leroy Schwarz}
   7547 }
   7548 @article {brands06,
   7549         title = {Secure User Identification Without Privacy Erosion},
   7550         journal = {University of Ottawa Law \& Technology Journal},
   7551         volume = {3},
   7552         year = {2006},
   7553         pages = {205--223},
   7554         abstract = {Individuals are increasingly confronted with requests to identify themselves when accessing services provided by government organizations, companies, and other service providers. At the same time, traditional transaction mechanisms are increasingly being replaced by electronic mechanisms that underneath their hood automatically capture and record globally unique identifiers. Taken together, these interrelated trends are currently eroding the privacy and security of individuals in a manner unimaginable just a few decades ago. Privacy activists are facing an increasingly hopeless battle against new privacy-invasive identification initiatives: the cost of computerized identification systems is rapidly going down, their accuracy and efficiency is improving all the time, much of the required data communication infrastructure is now in place, forgery of non-electronic user credentials is getting easier all the time, and data sharing imperatives have gone up dramatically. This paper argues that the privacy vs. identification debate should be moved into less polarized territory. Contrary to popular misbelief, identification and privacy are not opposite interests that need to be balanced: the same technological advances that threaten to annihilate privacy can be exploited to save privacy in an electronic age. The aim of this paper is to clarify that premise on the basis of a careful analysis of the concept of user identification itself. Following an examination of user identifiers and its purposes, I classify identification technologies in a manner that enables their privacy and security implications to be clearly articulated and contrasted. I also include an overview of a modern privacy-preserving approach to user identification},
   7555         www_section = {authentication, cryptography, data sharing, privacy},
   7556         url = {http://papers.ssrn.com/sol3/papers.cfm?abstract_id=999695},
   7557         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/brands06.pdf},
   7558         author = {Stefan Brands}
   7559 }
   7560 @conference {kutzner06securessr,
   7561         title = {Securing the Scalable Source Routing Protocol},
   7562         booktitle = {Proceedings of the World Telecommunications Congress 2006},
   7563         year = {2006},
   7564         type = {publication},
   7565         address = {Budapest, Hungary},
   7566         abstract = {The Scalable Source Routing (SSR) protocol combines overlay-like routing in a virtual network structure with source routing in the physical network to a single cross-layer architecture. Thereby, it can provide indirect routing in networks that lack a well-crafted structure. SSR is well suited for mobile ad hoc networks, sensor-actuator networks, and especially for mesh networks. Moreover, SSR directly provides the routing semantics of a structured routing overlay, making it an efficient basis for the scalable implementation of fully decentralized applications. In this paper we analyze SSR with regard to security: We show where SSR is prone to attacks, and we describe protocol modifications that make SSR robust in the presence of malicious nodes. The core idea is to introduce cryptographic certificates that allow nodes to discover forged protocol messages. We evaluate our proposed modifications by means of simulations, and thus demonstrate that they are both effective and efficient},
   7567         www_section = {cryptography, scalable source routing, sensor networks},
   7568         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   7569         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner06securessr.pdf},
   7570         author = {Kendy Kutzner and Christian Wallenta and Thomas Fuhrmann}
   7571 }
   7572 @conference {1158641,
   7573         title = {Security Considerations in Space and Delay Tolerant Networks},
   7574         booktitle = {SMC-IT '06: Proceedings of the 2nd IEEE International Conference on Space Mission Challenges for Information Technology},
   7575         year = {2006},
   7576         pages = {29--38},
   7577         publisher = {IEEE Computer Society},
   7578         organization = {IEEE Computer Society},
   7579         address = {Washington, DC, USA},
   7580         abstract = {This paper reviews the Internet-inspired security work on delay tolerant networking, in particular, as it might apply to space missions, and identifies some challenges arising, for both the Internet security community and for space missions. These challenges include the development of key management schemes suited for space missions as well as a characterization of the actual security requirements applying. A specific goal of this paper is therefore to elicit feedback from space mission IT specialists in order to guide the development of security mechanisms for delay tolerant networking},
   7581         isbn = {0-7695-2644-6},
   7582         doi = {10.1109/SMC-IT.2006.66},
   7583         url = {http://portal.acm.org/citation.cfm?id=1158336.1158641$\#$},
   7584         author = {Farrell, Stephen and Cahill, Vinny}
   7585 }
   7586 @conference {tap:pet2006,
   7587         title = {On the Security of the Tor Authentication Protocol},
   7588         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   7589         year = {2006},
   7590         month = {June},
   7591         pages = {316--331},
   7592         publisher = {Springer},
   7593         organization = {Springer},
   7594         address = {Cambridge, UK},
   7595         abstract = {Tor is a popular anonymous Internet communication system, used by an estimated 250,000 users to anonymously exchange over five terabytes of data per day. The security of Tor depends on properly authenticating nodes to clients, but Tor uses a custom protocol, rather than an established one, to perform this authentication. In this paper, we provide a formal proof of security of this protocol, in the random oracle model, under reasonable cryptographic assumptions},
   7596         www_section = {Tor},
   7597         isbn = {978-3-540-68790-0},
   7598         doi = {10.1007/11957454},
   7599         url = {http://www.springerlink.com/content/n77w19002743xu51/},
   7600         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tap-pet2006.pdf},
   7601         author = {Ian Goldberg},
   7602         editor = {George Danezis and Philippe Golle}
   7603 }
   7604 @conference {Karnstedt2006SimilarityQueries,
   7605         title = {Similarity Queries on Structured Data in Structured Overlays},
   7606         booktitle = {Proceedings of the 22nd International Conference on Data Engineering Workshops},
   7607         series = {ICDEW '06},
   7608         year = {2006},
   7609         pages = {0--32},
   7610         publisher = {IEEE Computer Society},
   7611         organization = {IEEE Computer Society},
   7612         address = {Washington, DC, USA},
   7613         isbn = {0-7695-2571-7},
   7614         doi = {10.1109/ICDEW.2006.137},
   7615         url = {http://dx.doi.org/10.1109/ICDEW.2006.137},
   7616         author = {Karnstedt, Marcel and Sattler, Kai-Uwe and Manfred Hauswirth and Roman Schmidt}
   7617 }
   7618 @proceedings {2006_19,
   7619         booktitle = {Software Engineering for Ambient Intelligence Systems},
   7620         year = {2006},
   7621         abstract = {AmbiComp is a new research project that will invest about 30 person years into the development of a new and simple software engineering approach for mobile embedded interactive systems. In order to achieve its ambitious goals, it will combine research from different fields such as mobile peer-to-peer networks and operating systems. As a result, developing applications across multiple embedded devices shall be greatly facilitated},
   7622         www_section = {P2P},
   7623         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/meis-paper04.pdf},
   7624         editor = {unknown},
   7625         author = {Thomas Fuhrmann}
   7626 }
   7627 @conference {1170307,
   7628         title = {Storage Tradeoffs in a Collaborative Backup Service for Mobile Devices},
   7629         booktitle = {EDCC '06: Proceedings of the Sixth European Dependable Computing Conference},
   7630         year = {2006},
   7631         pages = {129--138},
   7632         publisher = {IEEE Computer Society},
   7633         organization = {IEEE Computer Society},
   7634         address = {Washington, DC, USA},
   7635         abstract = {Mobile devices are increasingly relied on but are used in contexts that put them at risk of physical dam- age, loss or theft. We consider a fault-tolerance ap- proach that exploits spontaneous interactions to imple- ment a collaborative backup service. We define the con- straints implied by the mobile environment,analyze how they translate into the storage layer of such a backup system and examine various design options. The paper concludes with a presentation of our prototype imple- mentation of the storage layer, an evaluation of the im- pact of several compression methods,and directions for future work},
   7636         isbn = {0-7695-2648-9},
   7637         doi = {10.1109/EDCC.2006.26},
   7638         url = {http://portal.acm.org/citation.cfm?id=1170307$\#$},
   7639         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/slides.pdf},
   7640         author = {Ludovic Court{\`e}s and Killijian, Marc-Olivier and Powell, David}
   7641 }
   7642 @article {Levine:2006,
   7643         title = {A Survey of Solutions to the Sybil Attack},
   7644         number = {2006-052},
   7645         year = {2006},
   7646         month = oct,
   7647         institution = {University of Massachusetts Amherst},
   7648         type = {Tech report},
   7649         address = {Amherst, MA},
   7650         abstract = {Many security mechanisms are based on specific assumptions of identity and are vulnerable to attacks when these assumptions are violated. For example, impersonation is the well-known consequence when authenticating credentials are stolen by a third party. Another attack on identity occurs when credentials for one identity are purposely shared by multiple individuals, for example to avoid paying twice for a service. In this paper, we survey the impact of the Sybil attack, an attack against identity in which an individual entity masquerades as multiple simultaneous identities. The Sybil attack is a fundamental problem in many systems, and it has so far resisted a universally applicable solution},
   7651         journal = {unknown},
   7652         www_section = {anonymity, security, Sybil attack},
   7653         url = {http://prisms.cs.umass.edu/brian/pubs/levine.sybil.tr.2006.pdf},
   7654         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20A\%20Survey\%20of\%20Solutions\%20to\%20the\%20Sybil\%20Attack.pdf},
   7655         author = {Brian Neil Levine and Clay Shields and Margolin, N. Boris}
   7656 }
   7657 @article {2006_20,
   7658         title = {A survey on networking games in telecommunications},
   7659         journal = {Computers \& Operations Research},
   7660         volume = {33},
   7661         year = {2006},
   7662         month = feb,
   7663         pages = {286--311},
   7664         publisher = {Elsevier},
   7665         abstract = {In this survey, we summarize different modeling and solution concepts of networking games, as well as a number of different applications in telecommunications that make use of or can make use of networking games. We identify some of the mathematical challenges and methodologies that are involved in these problems. We include here work that has relevance to networking games in telecommunications from other areas, in particular from transportation planning},
   7666         www_section = {communication network, game theory},
   7667         doi = {10.1016/j.cor.2004.06.005},
   7668         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/netgames.pdf},
   7669         author = {Eitan Altman and Thomas Boulogne and Rachid El-Azouzi and Tania Jim{\'e}nez and Laura Wynter}
   7670 }
   7671 @conference {Yu:2006:SDA:1159913.1159945,
   7672         title = {SybilGuard: defending against sybil attacks via social networks},
   7673         booktitle = {SIGCOMM'06. Proceedings of the 2006 conference on Applications, Technologies, Architectures, and Protocols for Computer Communications},
   7674         series = {SIGCOMM '06},
   7675         year = {2006},
   7676         month = sep,
   7677         pages = {267--278},
   7678         publisher = {ACM},
   7679         organization = {ACM},
   7680         address = {Pisa, Italy},
   7681         abstract = {Peer-to-peer and other decentralized,distributed systems are known to be particularly vulnerable to sybil attacks. In a sybil attack,a malicious user obtains multiple fake identities and pretends to be multiple, distinct nodes in the system. By controlling a large fraction of the nodes in the system,the malicious user is able to "out vote" the honest users in collaborative tasks such as Byzantine failure defenses. This paper presents SybilGuard, a novel protocol for limiting the corruptive influences of sybil attacks.Our protocol is based on the "social network "among user identities, where an edge between two identities indicates a human-established trust relationship. Malicious users can create many identities but few trust relationships. Thus, there is a disproportionately-small "cut" in the graph between the sybil nodes and the honest nodes. SybilGuard exploits this property to bound the number of identities a malicious user can create.We show the effectiveness of SybilGuard both analytically and experimentally},
   7682         www_section = {social networks, Sybil attack, sybilGuard},
   7683         isbn = {1-59593-308-5},
   7684         doi = {http://doi.acm.org/10.1145/1159913.1159945},
   7685         url = {http://doi.acm.org/10.1145/1159913.1159945},
   7686         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2706\%20-\%20SybilGuard.pdf},
   7687         author = {Yu, Haifeng and Kaminsky, Michael and Gibbons, Phillip B. and Flaxman, Abraham}
   7688 }
   7689 @article {citeulike:530977,
   7690         title = {Taxonomy of trust: Categorizing P2P reputation systems},
   7691         journal = {Management in Peer-to-Peer Systems},
   7692         volume = {50},
   7693         number = {4},
   7694         year = {2006},
   7695         month = mar,
   7696         pages = {472--484},
   7697         abstract = {The field of peer-to-peer reputation systems has exploded in the last few years. Our goal is to organize existing ideas and work to facilitate system design. We present a taxonomy of reputation system components, their properties, and discuss how user behavior and technical constraints can conflict. In our discussion, we describe research that exemplifies compromises made to deliver a useable, implementable system},
   7698         www_section = {P2P, trust},
   7699         doi = {10.1016/j.comnet.2005.07.011},
   7700         url = {http://portal.acm.org/citation.cfm?id=1139713},
   7701         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Marti-ElsevierScienceSubmitted05_0.pdf},
   7702         author = {Marti, Sergio and Hector Garcia-Molina}
   7703 }
   7704 @conference {ShWa-Timing06,
   7705         title = {Timing Analysis in Low-Latency Mix Networks: Attacks and Defenses},
   7706         booktitle = {Proceedings of ESORICS 2006},
   7707         year = {2006},
   7708         month = {September},
   7709         publisher = {Springer Berlin / Heidelberg},
   7710         organization = {Springer Berlin / Heidelberg},
   7711         abstract = {Mix networks are a popular mechanism for anonymous Internet communications. By routing IP traffic through an overlay chain of mixes, they aim to hide the relationship between its origin and destination. Using a realistic model of interactive Internet traffic, we study the problem of defending low-latency mix networks against attacks based on correlating inter-packet intervals on two or more links of the mix chain. We investigate several attack models, including an active attack which involves adversarial modification of packet flows in order to {\textquotedblleft}fingerprint{\textquotedblright} them, and analyze the tradeoffs between the amount of cover traffic, extra latency, and anonymity properties of the mix network. We demonstrate that previously proposed defenses are either ineffective, or impose a prohibitively large latency and/or bandwidth overhead on communicating applications. We propose a new defense based on adaptive padding},
   7712         www_section = {anonymity},
   7713         isbn = {978-3-540-44601-9},
   7714         doi = {10.1007/11863908},
   7715         url = {http://www.springerlink.com/content/3n136578m4211484/},
   7716         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ShWa-Timing06.pdf},
   7717         author = {Vitaly Shmatikov and Ming-Hsui Wang}
   7718 }
   7719 @conference {conf/infocom/SunHYL06,
   7720         title = {A Trust Evaluation Framework in Distributed Networks: Vulnerability Analysis and Defense Against Attacks},
   7721         booktitle = {INFOCOM},
   7722         year = {2006},
   7723         publisher = {IEEE},
   7724         organization = {IEEE},
   7725         abstract = {Evaluation of trustworthiness of participating entities is an effective method to stimulate collaboration and improve network security in distributed networks. Similar to other security related protocols, trust evaluation is an attractive target for adversaries. Currently, the vulnerabilities of trust evaluation system have not been well understood. In this paper, we present several attacks that can undermine the accuracy of trust evaluation, and then develop defense techniques. Based on our investigation on attacks and defense, we implement a trust evaluation system in ad hoc networks for securing ad hoc routing and assisting malicious node detection. Extensive simulations are performed to illustrate various attacks, the effectiveness of the proposed defense techniques, and the overall performance of the trust evaluation system},
   7726         www_section = {ad-hoc networks},
   7727         isbn = {1-4244-0349-9 },
   7728         doi = {10.1109/CISS.2006.286695 },
   7729         url = {http://dblp.uni-trier.de/db/conf/infocom/infocom2006.html$\#$SunHYL06},
   7730         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trust_infocom06_v4.pdf},
   7731         author = {Yan L. Sun and Zhu Han and Wei Yu and K. J. Ray Liu}
   7732 }
   7733 @book {2006_21,
   7734         title = {Unconditionally Secure Constant-Rounds Multi-party Computation for Equality, Comparison, Bits and Exponentiation},
   7735         booktitle = {Theory of Cryptography},
   7736         series = {Lecture Notes in Computer Science},
   7737         volume = {3876},
   7738         year = {2006},
   7739         pages = {285--304},
   7740         publisher = {Springer Berlin Heidelberg},
   7741         organization = {Springer Berlin Heidelberg},
   7742         abstract = {We show that if a set of players hold shares of a value a {\epsilon} Fp for some prime p (where the set of shares is written [a] p ), it is possible to compute, in constant rounds and with unconditional security, sharings of the bits of a, i.e., compute sharings [a0] p , ..., [al- 1] p such that l = ⌈ log2 p ⌉, a0,...,al--1 {\epsilon} {0,1} and a = summation of ai * 2^i where 0 <= i  <= l- 1. Our protocol is secure against active adversaries and works for any linear secret sharing scheme with a multiplication protocol. The complexity of our protocol is O(llogl) invocations of the multiplication protocol for the underlying secret sharing scheme, carried out in O(1) rounds.
   7743 This result immediately implies solutions to other long-standing open problems such as constant-rounds and unconditionally secure protocols for deciding whether a shared number is zero, comparing shared numbers, raising a shared number to a shared exponent and reducing a shared number modulo a shared modulus},
   7744         isbn = {978-3-540-32731-8},
   7745         doi = {10.1007/11681878_15},
   7746         url = {http://dx.doi.org/10.1007/11681878_15},
   7747         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UnconditionallySecure2006Damgard.pdf},
   7748         author = {Damg{\'a}rd, Ivan and Fitzi, Matthias and Kiltz, Eike and Nielsen, JesperBuus and Toft, Tomas},
   7749         editor = {Halevi, Shai and Rabin, Tal}
   7750 }
   7751 @conference {Stutzbach:2006:UCP:1177080.1177105,
   7752         title = {Understanding churn in peer-to-peer networks},
   7753         booktitle = {IMC'06. Proceedings of the 6th ACM SIGCOMM Conference on Internet Measurement},
   7754         series = {IMC '06},
   7755         year = {2006},
   7756         month = oct,
   7757         pages = {189--202},
   7758         publisher = {ACM},
   7759         organization = {ACM},
   7760         address = {Rio de Janeriro, Brazil},
   7761         abstract = {The dynamics of peer participation, or churn, are an inherent property of Peer-to-Peer (P2P) systems and critical for design and evaluation. Accurately characterizing churn requires precise and unbiased information about the arrival and departure of peers, which is challenging to acquire. Prior studies show that peer participation is highly dynamic but with conflicting characteristics. Therefore, churn remains poorly understood, despite its significance.In this paper, we identify several common pitfalls that lead to measurement error. We carefully address these difficulties and present a detailed study using three widely-deployed P2P systems: an unstructured file-sharing system (Gnutella), a content-distribution system (BitTorrent), and a Distributed Hash Table (Kad). Our analysis reveals several properties of churn: (i) overall dynamics are surprisingly similar across different systems, (ii) session lengths are not exponential, (iii) a large portion of active peers are highly stable while the remaining peers turn over quickly, and (iv) peer session lengths across consecutive appearances are correlated. In summary, this paper advances our understanding of churn by improving accuracy, comparing different P2P file sharingdistribution systems, and exploring new aspects of churn},
   7762         www_section = {BitTorrent, churn, Gnutella, KAD, peer-to-peer networking, session length, uptime},
   7763         isbn = {1-59593-561-4},
   7764         doi = {http://doi.acm.org/10.1145/1177080.1177105},
   7765         url = {http://doi.acm.org/10.1145/1177080.1177105},
   7766         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2706\%20-\%20Understanding\%20churn\%20in\%20p2p\%20networks.pdf},
   7767         author = {Stutzbach, Daniel and Rejaie, Reza}
   7768 }
   7769 @conference {valet:pet2006,
   7770         title = {Valet Services: Improving Hidden Servers with a Personal Touch},
   7771         booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET 2006)},
   7772         year = {2006},
   7773         month = {June},
   7774         pages = {223--244},
   7775         publisher = {Springer},
   7776         organization = {Springer},
   7777         address = {Cambridge, UK},
   7778         abstract = {Location hidden services have received increasing attention as a means to resist censorship and protect the identity of service operators. Research and vulnerability analysis to date has mainly focused on how to locate the hidden service. But while the hiding techniques have improved, almost no progress has been made in increasing the resistance against DoS attacks directly or indirectly on hidden services. In this paper we suggest improvements that should be easy to adopt within the existing hidden service design, improvements that will both reduce vulnerability to DoS attacks and add QoS as a service option. In addition we show how to hide not just the location but the existence of the hidden service from everyone but the users knowing its service address. Not even the public directory servers will know how a private hidden service can be contacted, or know it exists},
   7779         www_section = {censorship resistance, information hiding},
   7780         isbn = {978-3-540-68790-0},
   7781         doi = {10.1007/11957454},
   7782         url = {http://www.springerlink.com/content/d58607007777r8l1/},
   7783         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/valet-pet2006.pdf},
   7784         author = {Lasse {\O}verlier and Paul Syverson},
   7785         editor = {George Danezis and Philippe Golle}
   7786 }
   7787 @article {nguyen2006vsf,
   7788         title = {Verifiable shuffles: a formal model and a Paillier-based three-round construction with provable security},
   7789         journal = {International Journal of Information Security},
   7790         volume = {5},
   7791         number = {4},
   7792         year = {2006},
   7793         pages = {241--255},
   7794         publisher = {Springer},
   7795         abstract = {A shuffle takes a list of ciphertexts and outputs a permuted list of re-encryptions of the input ciphertexts. Mix-nets, a popular method for anonymous routing, can be constructed from a sequence of shuffles and decryption. We propose a formal model for security of verifiable shuffles and a new verifiable shuffle system based on the Paillier encryption scheme, and prove its security in the proposed dmodel. The model is general and can be extended to provide provable security for verifiable shuffle decryption},
   7796         www_section = {formal security model, paillier public-key system, privacy, verifiable shuffles},
   7797         issn = {1615-5262},
   7798         doi = {10.1007/s10207-006-0004-8},
   7799         url = {http://portal.acm.org/citation.cfm?id=1164438},
   7800         author = {Lan Nguyen and Rei Safavi-Naini and Kaoru Kurosawa}
   7801 }
   7802 @booklet {Widmer_abstractnetwork,
   7803         title = {ABSTRACT Network Coding for Efficient Communication in Extreme Networks},
   7804         year = {2005},
   7805         abstract = {Some forms of ad-hoc networks need to operate in extremely performance-challenged environments where end-to-end connectivity is rare. Such environments can be found for example in very sparse mobile networks where nodes {\textquotedblright}meet {\textquotedblright} only occasionally and are able to exchange information, or in wireless sensor networks where nodes sleep most of the time to conserve energy. Forwarding mechanisms in such networks usually resort to some form of intelligent flooding, as for example in probabilistic routing. We propose a communication algorithm that significantly reduces the overhead of probabilistic routing algorithms, making it a suitable building block for a delay-tolerant network architecture. Our forwarding scheme is based on network coding. Nodes do not simply forward packets they overhear but may send out information that is coded over the contents of several packets they received. We show by simulation that this algorithm achieves the reliability and robustness of flooding at a small fraction of the overhead},
   7806         www_section = {ad-hoc networks, delay tolerant network, routing},
   7807         isbn = {1-59593-026-4},
   7808         doi = {10.1145/1080139.1080147},
   7809         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.5368},
   7810         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.5368.pdf},
   7811         author = {J{\"o}rg Widmer}
   7812 }
   7813 @conference {pet05-borisov,
   7814         title = {An Analysis of Parallel Mixing with Attacker-Controlled Inputs},
   7815         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)},
   7816         year = {2005},
   7817         month = {May},
   7818         pages = {12--25},
   7819         publisher = {Springer Berlin / Heidelberg},
   7820         organization = {Springer Berlin / Heidelberg},
   7821         abstract = {Parallel mixing [7] is a technique for optimizing the latency of a synchronous re-encryption mix network. We analyze the anonymity of this technique when an adversary can learn the output positions of some of the inputs to the mix network. Using probabilistic modeling, we show that parallel mixing falls short of achieving optimal anonymity in this case. In particular, when the number of unknown inputs is small, there are significant anonymity losses in the expected case. This remains true even if all the mixes in the network are honest, and becomes worse as the number of mixes increases. We also consider repeatedly applying parallel mixing to the same set of inputs. We show that an attacker who knows some input--output relationships will learn new information with each mixing and can eventually link previously unknown inputs and outputs},
   7822         www_section = {anonymity, mix},
   7823         isbn = {978-3-540-34745-3},
   7824         doi = {10.1007/11767831},
   7825         url = {http://www.springerlink.com/content/b0t0714165846m42/},
   7826         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-borisov.pdf},
   7827         author = {Borisov, Nikita}
   7828 }
   7829 @mastersthesis {DiazThesis05,
   7830         title = {Anonymity and Privacy in Electronic Services},
   7831         year = {2005},
   7832         month = {December},
   7833         school = {Katholieke Universiteit Leuven},
   7834         type = {phd},
   7835         address = {Leuven, Belgium},
   7836         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DiazThesis05.pdf},
   7837         author = {Claudia Diaz}
   7838 }
   7839 @booklet {Borisov:CSD-05-1390,
   7840         title = {Anonymity in Structured Peer-to-Peer Networks},
   7841         number = {UCB/CSD-05-1390},
   7842         year = {2005},
   7843         month = {May},
   7844         publisher = {EECS Department, University of California, Berkeley},
   7845         abstract = {Existing peer-to-peer systems that aim to provide anonymity to its users are based on networks with unstructured or loosely-structured routing algorithms. Structured routing offers performance and robustness guarantees that these systems are unable to achieve. We therefore investigate adding anonymity support to structured peer-to-peer networks. We apply an entropy-based anonymity metric to Chord and use this metric to quantify the improvements in anonymity afforded by several possible extensions. We identify particular properties of Chord that have the strongest effect on anonymity and propose a routing extension that allows a general trade-off between anonymity and performance. Our results should be applicable to other structured peer-to-peer systems},
   7846         url = {http://www.eecs.berkeley.edu/Pubs/TechRpts/2005/6509.html},
   7847         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-05-1390.pdf},
   7848         author = {Borisov, Nikita and Waddle, Jason}
   7849 }
   7850 @conference {sofem05-Klonowski,
   7851         title = {Anonymous Communication with On-line and Off-line Onion Encoding},
   7852         booktitle = {Proceedings of Conference on Current Trends in Theory and Practice of Informatics (SOFSEM 2005)},
   7853         year = {2005},
   7854         month = jan,
   7855         publisher = {Springer Berlin / Heidelberg},
   7856         organization = {Springer Berlin / Heidelberg},
   7857         abstract = {Anonymous communication with onions requires that a user application determines the whole routing path of an onion. This scenario has certain disadvantages, it might be dangerous in some situations, and it does not fit well to the current layered architecture of dynamic communication networks.
   7858 We show that applying encoding based on universal re-encryption can solve many of these problems by providing much flexibility -- the onions can be created on-the-fly or in advance by different parties},
   7859         www_section = {onion routing, universal re-encryption},
   7860         isbn = {978-3-540-24302-1},
   7861         doi = {10.1007/b105088},
   7862         url = {http://www.springerlink.com/content/9023b6ad0thaf51p/},
   7863         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sofem05-Klonowski.pdf},
   7864         author = {Marek Klonowski and Miroslaw Kutylowski and Filip Zagorski}
   7865 }
   7866 @conference {1080833,
   7867         title = {Architecture and evaluation of an unplanned 802.11b mesh network},
   7868         booktitle = {MobiCom '05: Proceedings of the 11th annual international conference on Mobile computing and networking},
   7869         year = {2005},
   7870         pages = {31--42},
   7871         publisher = {ACM},
   7872         organization = {ACM},
   7873         address = {New York, NY, USA},
   7874         abstract = {This paper evaluates the ability of a wireless mesh architecture to provide high performance Internet access while demanding little deployment planning or operational management. The architecture considered in this paper has unplanned node placement (rather than planned topology), omni-directional antennas (rather than directional links), and multi-hop routing (rather than single-hop base stations). These design decisions contribute to ease of deployment, an important requirement for community wireless networks. However, this architecture carries the risk that lack of planning might render the network's performance unusably low. For example, it might be necessary to place nodes carefully to ensure connectivity; the omni-directional antennas might provide uselessly short radio ranges; or the inefficiency of multi-hop forwarding might leave some users effectively disconnected.The paper evaluates this unplanned mesh architecture with a case study of the Roofnet 802.11b mesh network. Roofnet consists of 37 nodes spread over four square kilometers of an urban area. The network provides users with usable performance despite lack of planning: the average inter-node throughput is 627 kbits/second, even though the average route has three hops.The paper evaluates multiple aspects of the architecture: the effect of node density on connectivity and throughput; the characteristics of the links that the routing protocol elects to use; the usefulness of the highly connected mesh afforded by omni-directional antennas for robustness and throughput; and the potential performance of a single-hop network using the same nodes as Roofnet},
   7875         www_section = {ad-hoc networks, mesh networks, multi-hop networks, route metrics, wireless routing},
   7876         isbn = {1-59593-020-5},
   7877         doi = {10.1145/1080829.1080833},
   7878         url = {http://portal.acm.org/citation.cfm?id=1080833$\#$},
   7879         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.62.3119.pdf},
   7880         author = {Bicket, John and Aguayo, Daniel and Biswas, Sanjit and Robert Morris}
   7881 }
   7882 @article {1095816,
   7883         title = {BAR fault tolerance for cooperative services},
   7884         journal = {SIGOPS Oper. Syst. Rev},
   7885         volume = {39},
   7886         number = {5},
   7887         year = {2005},
   7888         pages = {45--58},
   7889         publisher = {ACM},
   7890         address = {New York, NY, USA},
   7891         abstract = {This paper describes a general approach to constructing cooperative services that span multiple administrative domains. In such environments, protocols must tolerate both Byzantine behaviors when broken, misconfigured, or malicious nodes arbitrarily deviate from their specification and rational behaviors when selfish nodes deviate from their specification to increase their local benefit. The paper makes three contributions: (1) It introduces the BAR (Byzantine, Altruistic, Rational) model as a foundation for reasoning about cooperative services; (2) It proposes a general three-level architecture to reduce the complexity of building services under the BAR model; and (3) It describes an implementation of BAR-B the first cooperative backup service to tolerate both Byzantine users and an unbounded number of rational users. At the core of BAR-B is an asynchronous replicated state machine that provides the customary safety and liveness guarantees despite nodes exhibiting both Byzantine and rational behaviors. Our prototype provides acceptable performance for our application: our BAR-tolerant state machine executes 15 requests per second, and our BAR-B backup service can back up 100MB of data in under 4 minutes},
   7892         www_section = {byzantine fault tolerance, game theory, reliability},
   7893         issn = {0163-5980},
   7894         doi = {10.1145/1095809.1095816},
   7895         url = {http://portal.acm.org/citation.cfm?id=1095816$\#$},
   7896         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.80.713.pdf},
   7897         author = {Aiyer, Amitanand S. and Lorenzo Alvisi and Clement, Allen and Dahlin, Mike and Martin, Jean-Philippe and Porth, Carl}
   7898 }
   7899 @conference {Pouwelse05thebittorrent,
   7900         title = {The BiTtorrent P2P File-sharing System: Measurements and Analysis},
   7901         booktitle = {IPTPS'05. Proceedings of the 4th International Workshop on Peer-To-Peer Systems},
   7902         series = {Lecture Notes in Computer Science},
   7903         volume = {3640},
   7904         year = {2005},
   7905         month = feb,
   7906         pages = {205--216},
   7907         publisher = {Springer},
   7908         organization = {Springer},
   7909         address = {Ithaca, NY, USA},
   7910         abstract = {Of the many P2P file-sharing prototypes in existence, BitTorrent is one of the few that has managed to attract millions of users. BitTorrent relies on other (global) components for file search, employs a moderator system to ensure the integrity of file data, and uses a bartering technique for downloading in order to prevent users from freeriding. In this paper we present a measurement study of BitTorrent in which we focus on four issues, viz. availability, integrity, flashcrowd handling, and download performance. The purpose of this paper is to aid in the understanding of a real P2P system that apparently has the right mechanisms to attract a large user community, to provide measurement data that may be useful in modeling P2P systems, and to identify design issues in such systems},
   7911         www_section = {BitTorrent, file-sharing},
   7912         doi = {10.1007/11558989_19},
   7913         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2705\%20-\%20The\%20BitTorrent\%3A\%20measurements\%20and\%20analysis.pdf},
   7914         author = {Johan Pouwelse and Garbacki, Pawel and Epema, Dick H. J. and Henk J. Sips}
   7915 }
   7916 @conference {ih05-Luke,
   7917         title = {On Blending Attacks For Mixes with Memory},
   7918         booktitle = {Proceedings of Information Hiding Workshop (IH 2005)},
   7919         year = {2005},
   7920         month = {June},
   7921         pages = {39--52},
   7922         publisher = {Springer Berlin / Heidelberg},
   7923         organization = {Springer Berlin / Heidelberg},
   7924         abstract = {Blending attacks are a general class of traffic-based attacks, exemplified by the (n--1)-attack. Adding memory or pools to mixes mitigates against such attacks, however there are few known quantitative results concerning the effect of pools on blending attacks. In this paper we give a precise analysis of the number of rounds required to perform an (n--1)-attack on the pool mix, timed pool mix, timed dynamic pool mix and the binomial mix},
   7925         www_section = {mix, traffic analysis},
   7926         isbn = {978-3-540-29039-1},
   7927         doi = {10.1007/11558859},
   7928         url = {http://www.springerlink.com/index/y78350424h77u578.pdf},
   7929         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-Luke.pdf},
   7930         author = {Luke O'Connor}
   7931 }
   7932 @conference {1108067,
   7933         title = {Boundary Chord: A Novel Peer-to-Peer Algorithm for Replica Location Mechanism in Grid Environment},
   7934         booktitle = {ISPAN '05: Proceedings of the 8th International Symposium on Parallel Architectures,Algorithms and Networks},
   7935         year = {2005},
   7936         pages = {262--267},
   7937         publisher = {IEEE Computer Society},
   7938         organization = {IEEE Computer Society},
   7939         address = {Washington, DC, USA},
   7940         abstract = {The emerging grids need an efficient replica location mechanism. In the experience of developing 1 ChinaGrid Supporting Platform (CGSP), a grid middleware that builds a uniform platform supporting multiple grid-based applications, we meet a challenge of utilizing the properties of locality in replica location process to construct a practical and high performance replica location mechanism. The key of the solution to this challenge is to design an efficient replica location algorithm that meets above requirements. Some previous works have been done to build a replica location mechanism, but they are not suitable for replica location in a grid environment with multiple applications like ChinaGrid. In this paper, we present a novel peer-to-peer algorithm for replica location mechanism, Boundary Chord, which has the merits of locality awareness, self-organization, and load balancing. Simulation results show that the algorithm has better performance than other structured peer-to-peer solutions to the replica location problem},
   7941         isbn = {0-7695-2509-1},
   7942         doi = {10.1109/ISPAN.2005.21},
   7943         url = {http://portal.acm.org/citation.cfm?id=1108067$\#$},
   7944         author = {Jin, Hai and Wang, Chengwei and Chen, Hanhua}
   7945 }
   7946 @article {Pfister05capacity-achievingensembles,
   7947         title = {Capacity-achieving ensembles for the binary erasure channel with bounded complexity},
   7948         journal = {IEEE TRANS. INFORMATION THEORY},
   7949         volume = {51},
   7950         number = {7},
   7951         year = {2005},
   7952         pages = {2352--2379},
   7953         abstract = {We present two sequences of ensembles of nonsystematic irregular repeat--accumulate (IRA) codes which asymptotically (as their block length tends to infinity) achieve capacity on the binary erasure channel (BEC) with bounded complexity per information bit. This is in contrast to all previous constructions of capacity-achieving sequences of ensembles whose complexity grows at least like the log of the inverse of the gap (in rate) to capacity. The new bounded complexity result is achieved by puncturing bits, and allowing in this way a sufficient number of state nodes in the Tanner graph representing the codes. We derive an information-theoretic lower bound on the decoding complexity of randomly punctured codes on graphs. The bound holds for every memoryless binary-input output-symmetric (MBIOS) channel and is refined for the binary erasure channel},
   7954         www_section = {BEC, coding theory, IRA, MBIOS},
   7955         isbn = {0-7803-8280-3  },
   7956         doi = {10.1109/ISIT.2004.1365246  },
   7957         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.90.3798},
   7958         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0409026v1.pdf},
   7959         author = {Henry D. Pfister and Igal Sason and R{\"u}diger L. Urbanke}
   7960 }
   7961 @conference {Zhuang05cashmere:resilient,
   7962         title = {Cashmere: Resilient anonymous routing},
   7963         booktitle = {In Proc. of NSDI},
   7964         year = {2005},
   7965         publisher = {ACM/USENIX},
   7966         organization = {ACM/USENIX},
   7967         abstract = {Anonymous routing protects user communication from identification by third-party observers. Existing anonymous routing layers utilize Chaum-Mixes for anonymity by relaying traffic through relay nodes called mixes. The source defines a static forwarding path through which traffic is relayed to the destination. The resulting path is fragile and shortlived: failure of one mix in the path breaks the forwarding path and results in data loss and jitter before a new path is constructed. In this paper, we propose Cashmere, a resilient anonymous routing layer built on a structured peer-to-peer overlay. Instead of single-node mixes, Cashmere selects regions in the overlay namespace as mixes. Any node in a region can act as the MIX, drastically reducing the probability of a mix failure. We analyze Cashmere's anonymity and measure its performance through simulation and measurements, and show that it maintains high anonymity while providing orders of magnitude improvement in resilience to network dynamics and node failures},
   7968         url = {http://portal.acm.org/citation.cfm?id=1251203.1251225$\#$},
   7969         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cashmere.pdf},
   7970         author = {Li Zhuang and Feng Zhou and Ben Y. Zhao and Antony Rowstron}
   7971 }
   7972 @conference {ih05-csispir,
   7973         title = {Censorship Resistance Revisited},
   7974         booktitle = {Proceedings of Information Hiding Workshop (IH 2005)},
   7975         year = {2005},
   7976         month = {June},
   7977         pages = {62--76},
   7978         publisher = {Springer Berlin / Heidelberg},
   7979         organization = {Springer Berlin / Heidelberg},
   7980         abstract = {{\textquotedblleft}Censorship resistant{\textquotedblright} systems attempt to prevent censors from imposing a particular distribution of content across a system. In this paper, we introduce a variation of censorship resistance (CR) that is resistant to selective filtering even by a censor who is able to inspect (but not alter) the internal contents and computations of each data server, excluding only the server's private signature key. This models a service provided by operators who do not hide their identities from censors. Even with such a strong adversarial model, our definition states that CR is only achieved if the censor must disable the entire system to filter selected content. We show that existing censorship resistant systems fail to meet this definition; that Private Information Retrieval (PIR) is necessary, though not sufficient, to achieve our definition of CR; and that CR is achieved through a modification of PIR for which known implementations exist},
   7981         www_section = {censorship resistance, private information retrieval},
   7982         isbn = {978-3-540-29039-1},
   7983         doi = {10.1007/11558859},
   7984         url = {http://www.springerlink.com/content/f08707qw34614340/},
   7985         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-csispir.pdf},
   7986         author = {Ginger Perng and Michael K. Reiter and Chenxi Wang}
   7987 }
   7988 @conference {2005_0,
   7989         title = {Chainsaw: Eliminating Trees from Overlay Multicast},
   7990         booktitle = {4th International Workshop},
   7991         series = {Lecture Notes in Computer Science (Peer-to-peer Systems IV)},
   7992         volume = {3640},
   7993         year = {2005},
   7994         month = nov,
   7995         pages = {127--140},
   7996         publisher = {Springer Berlin / Heidelberg},
   7997         organization = {Springer Berlin / Heidelberg},
   7998         address = {Ithaca, NY, USA},
   7999         abstract = {In this paper, we present Chainsaw, a p2p overlay multicast system that completely eliminates trees. Peers are notified of new packets by their neighbors and must explicitly request a packet from a neighbor in order to receive it. This way, duplicate data can be eliminated and a peer can ensure it receives all packets. We show with simulations that Chainsaw has a short startup time, good resilience to catastrophic failure and essentially no packet loss. We support this argument with real-world experiments on Planetlab and compare Chainsaw to Bullet and Splitstream using MACEDON},
   8000         www_section = {chainsaw, p2p overlay multicast system, packet loss, trees},
   8001         isbn = {978-3-540-29068-1},
   8002         issn = {1611-3349 (Online)},
   8003         doi = {10.1007/11558989},
   8004         url = {http://www.springerlink.com/content/l13550223q12l65v/about/},
   8005         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chainsaw.pdf},
   8006         author = {Vinay Pai and Kapil Kumar and Karthik Tamilmani and Vinay Sambamurthy and Alexander E. Mohr},
   8007         editor = {Miguel Castro and Robbert Van Renesse}
   8008 }
   8009 @booklet { guha05characterization,
   8010         title = {Characterization and measurement of tcp traversal through nats and firewalls},
   8011         year = {2005},
   8012         abstract = {In recent years, the standards community has developed techniques for traversing NAT/firewall boxes with UDP (that is, establishing UDP flows between hosts behind NATs). Because of the asymmetric nature of TCP connection establishment, however, NAT traversal of TCP is more difficult. Researchers have recently proposed a variety of promising approaches for TCP NAT traversal. The success of these approaches, however, depend on how NAT boxes respond to various sequences of TCP (and ICMP) packets. This paper presents the first broad study of NAT behavior for a comprehensive set of TCP NAT traversal techniques over a wide range of commercial NAT products. We developed a publicly available software test suite that measures the NAT's responses both to a variety of isolated probes and to complete TCP connection establishments. We test sixteen NAT products in the lab, and 93 home NATs in the wild. Using these results, as well as market data for NAT products, we estimate the likelihood of successful NAT traversal for home networks. The insights gained from this paper can be used to guide both design of TCP NAT traversal protocols and the standardization of NAT/firewall behavior, including the IPv4-IPv6 translating NATs critical for IPv6 transition},
   8013         www_section = {firewall, NAT},
   8014         url = {http://portal.acm.org/citation.cfm?id=1251086.1251104},
   8015         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/imc05-tcpnat.pdf},
   8016         author = {Saikat Guha and Paul Francis}
   8017 }
   8018 @conference {chl05-full:eurocrypt2005,
   8019         title = {Compact E-Cash},
   8020         booktitle = {Proceedings of EUROCRYPT 2005},
   8021         series = {Lecture Notes in Computer Science},
   8022         volume = {3494},
   8023         year = {2005},
   8024         pages = {302--321},
   8025         publisher = {Springer},
   8026         organization = {Springer},
   8027         abstract = {This paper presents efficient off-line anonymous e-cash schemes where a user can withdraw a wallet containing 2^l coins each of which she can spend unlinkably. Our first result is a scheme, secure under the strong RSA and the y-DDHI assumptions, where the complexity of the withdrawal and spend operations is O(l+k) and the user's wallet can be stored using O(l+k) bits, where k is a security parameter. The best previously known schemes require at least one of these complexities to be O(2^l k). In fact, compared to previous e-cash schemes, our whole wallet of 2^l coins has about the same size as one coin in these schemes. Our scheme also offers exculpability of users, that is, the bank can prove to third parties that a user has double-spent.
   8028 
   8029 We then extend our scheme to our second result, the first e-cash scheme that provides traceable coins without a trusted third party. That is, once a user has double spent one of the 2^l coins in her wallet, all her spendings of these coins can be traced. We present two alternate constructions. One construction shares the same complexities with our first result but requires a strong bilinear map assumption that is only conjectured to hold on MNT curves. The second construction works on more general types of elliptic curves, but the price for this is that the complexity of the spending and of the withdrawal protocols becomes O(lk) and O(lk + k^2) bits, respectively, and wallets take O(lk) bits of storage. All our schemes are secure in the random oracle model},
   8030         isbn = {3-540-25910-4},
   8031         doi = {10.1007/b136415},
   8032         url = {http://www.springerlink.com/content/vwkgkfpdmrdky5a8/},
   8033         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chl05-full-eurocrypt2005.pdf},
   8034         author = {Jan Camenisch and Susan Hohenberger and Anna Lysyanskaya},
   8035         editor = {Ronald Cramer}
   8036 }
   8037 @conference {ih05-danezisclulow,
   8038         title = {Compulsion Resistant Anonymous Communications},
   8039         booktitle = {Proceedings of Information Hiding Workshop (IH 2005)},
   8040         year = {2005},
   8041         month = {June},
   8042         pages = {11--25},
   8043         publisher = {Springer Berlin / Heidelberg},
   8044         organization = {Springer Berlin / Heidelberg},
   8045         abstract = {We study the effect compulsion attacks, through which an adversary can request a decryption or key from an honest node, have on the security of mix based anonymous communication systems. Some specific countermeasures are proposed that increase the cost of compulsion attacks, detect that tracing is taking place and ultimately allow for some anonymity to be preserved even when all nodes are under compulsion. Going beyond the case when a single message is traced, we also analyze the effect of multiple messages being traced and devise some techniques that could retain some anonymity. Our analysis highlights that we can reason about plausible deniability in terms of the information theoretic anonymity metrics},
   8046         www_section = {countermeasure, mix},
   8047         isbn = {978-3-540-29039-1},
   8048         doi = {10.1007/11558859},
   8049         url = {http://www.springerlink.com/content/74461772r675l828/},
   8050         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-danezisclulow.pdf},
   8051         author = {George Danezis and Jolyon Clulow}
   8052 }
   8053 @article {2005_1,
   8054         title = {Cooperation among strangers with limited information about reputation},
   8055         journal = {Journal of Public Economics},
   8056         volume = {89},
   8057         year = {2005},
   8058         month = aug,
   8059         pages = {1457--1468},
   8060         abstract = {The amount of institutional intervention necessary to secure efficiency-enhancing cooperation in markets and organizations, in circumstances where interactions take place among essentially strangers, depends critically on the amount of information informal reputation mechanisms need transmit. Models based on subgame perfection find that the information necessary to support cooperation is recursive in nature and thus information generating and processing requirements are quite demanding. Models that do not rely on subgame perfection, on the other hand, suggest that the information demands may be quite modest. The experiment we present indicates that even without any reputation information there is a non-negligible amount of cooperation that is, however, quite sensitive to the cooperation costs. For high costs, providing information about a partner's immediate past action increases cooperation. Recursive information about the partners' previous partners' reputation further promotes cooperation, regardless of the cooperation costs},
   8061         www_section = {cooperation, experimental economics, reputation},
   8062         doi = {doi:10.1016/j.jpubeco.2004.03.008},
   8063         url = {doi:10.1016/j.jpubeco.2004.03.008},
   8064         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Public\%20Economics\%20-\%20Bolton\%2C\%20Katok\%20\%26\%20Ockenfels.pdf},
   8065         author = {Gary E. Bolton and Elena Katok and Axel Ockenfels}
   8066 }
   8067 @conference {2005_2,
   8068         title = {Correctness of a gossip based membership protocol},
   8069         booktitle = {PDOC'05},
   8070         year = {2005},
   8071         publisher = {ACM},
   8072         organization = {ACM},
   8073         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gossip-podc05.pdf},
   8074         author = {Andre Allavena and Alan Demers and John E. Hopcroft}
   8075 }
   8076 @conference {Moore05counteringhidden-action,
   8077         title = {Countering Hidden-action Attacks on Networked Systems},
   8078         booktitle = {WEIS'05. Fourth Workshop on the Economics of Information Security},
   8079         year = {2005},
   8080         month = jun,
   8081         address = {Cambridge, England},
   8082         abstract = {We define an economic category of hidden-action attacks: actions made attractive by a lack of observation. We then consider its implications for computer systems. Rather than structure contracts to compensate for incentive problems, we rely on insights from social capital theory to design network topologies and interactions that undermine the potential for hidden-action attacks},
   8083         www_section = {asymmetric information, computer security, decentralized, economics, information security, moral hazard, social capital},
   8084         doi = {10.1.1.119.8132},
   8085         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WEIS\%2705\%20-\%20Moore\%20-\%20Counterin\%20hidden-action\%20attacks.pdf},
   8086         author = {Tyler Moore}
   8087 }
   8088 @conference {Massoulie:2005:CRS:1064212.1064215,
   8089         title = {Coupon replication systems},
   8090         booktitle = {SIGMETRICS'05. Proceedings of the 2005 ACM SIGMETRICS International Conference on Measurement and Modeling of Computer Systems},
   8091         series = {SIGMETRICS '05},
   8092         year = {2005},
   8093         month = jun,
   8094         pages = {2--13},
   8095         publisher = {ACM},
   8096         organization = {ACM},
   8097         address = {Banff, Alberta, Canada},
   8098         abstract = {Motivated by the study of peer-to-peer file swarming systems {\`a} la BitTorrent, we introduce a probabilistic model of coupon replication systems. These systems consist of users, aiming to complete a collection of distinct coupons. Users are characterised by their current collection of coupons, and leave the system once they complete their coupon collection. The system evolution is then specified by describing how users of distinct types meet, and which coupons get replicated upon such encounters.For open systems, with exogenous user arrivals, we derive necessary and sufficient stability conditions in a layered scenario, where encounters are between users holding the same number of coupons. We also consider a system where encounters are between users chosen uniformly at random from the whole population. We show that performance, captured by sojourn time, is asymptotically optimal in both systems as the number of coupon types becomes large.We also consider closed systems with no exogenous user arrivals. In a special scenario where users have only one missing coupon, we evaluate the size of the population ultimately remaining in the system, as the initial number of users, N, goes to infinity. We show that this decreases geometrically with the number of coupons, K. In particular, when the ratio K/log(N) is above a critical threshold, we prove that this number of left-overs is of order log(log(N)).These results suggest that performance of file swarming systems does not depend critically on either altruistic user behavior, or on load balancing strategies such as rarest first},
   8099         www_section = {content distribution, file swarming, peer-to-peer networking},
   8100         isbn = {1-59593-022-1},
   8101         doi = {http://doi.acm.org/10.1145/1064212.1064215},
   8102         url = {http://doi.acm.org/10.1145/1064212.1064215},
   8103         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGMETRICS\%2705\%20-\%20Coupon\%20replication\%20systems.pdf},
   8104         author = {Massouli{\'e}, Laurent and Vojnovi{\'c}, Milan}
   8105 }
   8106 @conference {Kostoulas:2005:DSS:1097873.1098292,
   8107         title = {Decentralized Schemes for Size Estimation in Large and Dynamic Groups},
   8108         booktitle = {NCA'05--Proceedings of the 4th IEEE International Symposium on Network Computing and Applications},
   8109         year = {2005},
   8110         month = jul,
   8111         pages = {41--48},
   8112         publisher = {IEEE Computer Society},
   8113         organization = {IEEE Computer Society},
   8114         address = {Cambridge, MA, USA},
   8115         abstract = {Large-scale and dynamically changing distributed systems such as the Grid, peer-to-peer overlays, etc., need to collect several kinds of global statistics in a decentralized manner. In this paper, we tackle a specific statistic collection problem called Group Size Estimation, for estimating the number of non-faulty processes present in the global group at any given point of time. We present two new decentralized algorithms for estimation in dynamic groups, analyze the algorithms, and experimentally evaluate them using real-life traces. One scheme is active: it spreads a gossip into the overlay first, and then samples the receipt times of this gossip at different processes. The second scheme is passive: it measures the density of processes when their identifiers are hashed into a real interval. Both schemes have low latency, scalable perprocess overheads, and provide high levels of probabilistic accuracy for the estimate. They are implemented as part of a size estimation utility called PeerCounter that can be incorporated modularly into standard peer-to-peer overlays. We present experimental results from both the simulations and PeerCounter, running on a cluster of 33 Linux servers},
   8116         www_section = {decentralized, distributed systems, network size estimation},
   8117         isbn = {0-7695-2326-9},
   8118         doi = {10.1109/NCA.2005.15},
   8119         url = {http://dl.acm.org/citation.cfm?id=1097873.1098292},
   8120         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NCA\%2705\%20-\%20Decentralized\%20Schemes\%20for\%20Size\%20Estimation\%20in\%20Large\%20and\%20Dynamic\%20Groups.pdf},
   8121         author = {Kostoulas, Dionysios and Psaltoulis, Dimitrios and Indranil Gupta and Kenneth P. Birman and Alan Demers}
   8122 }
   8123 @conference {You05deepstore:,
   8124         title = {Deep Store: An archival storage system architecture},
   8125         booktitle = {In Proceedings of the 21st International Conference on Data Engineering (ICDE'05)},
   8126   year = {2005},
   8127         pages = {804--815},
   8128         publisher = {IEEE},
   8129         organization = {IEEE},
   8130         abstract = {We present the Deep Store archival storage architecture, a large-scale storage system that stores immutable dataefficiently and reliably for long periods of time. Archived data is stored across a cluster of nodes and recorded to hard disk. The design differentiates itself from traditional file systems by eliminating redundancy within and across files, distributing content for scalability, associating rich metadata with content, and using variable levels of replication based on the importance or degree of dependency of each piece of stored data. We evaluate the foundations of our design, including PRESIDIO, a virtual content-addressable storage framework with multiple methods for inter-file and intra-file compression that effectively addresses the data-dependent variability of data compression. We measure content and metadata storage efficiency, demonstrate the need for a variable-degree replication model, and provide preliminary results for storage performance},
   8131         www_section = {storage},
   8132         isbn = {0-7695-2285-8},
   8133         doi = {10.1109/ICDE.2005.47},
   8134         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.66.6928},
   8135         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.76.5241.pdf},
   8136         author = {Lawrence L. You and Kristal T. Pollack and Darrell D. E. Long}
   8137 }
   8138 @conference {Electrical04designingincentives,
   8139         title = {Designing Incentives for Peer-to-Peer Routing},
   8140         booktitle = {INFOCOM 2005, 24th Annual Joint Conference of the IEEE Computer and Communications Societies},
   8141         volume = {1},
   8142         year = {2005},
   8143         month = mar,
   8144         pages = {374--385},
   8145         publisher = {IEEE Computer Society},
   8146         organization = {IEEE Computer Society},
   8147         address = {Miami, FL, USA},
   8148         abstract = {In a peer-to-peer network, nodes are typically required to route packets for each other. This leads to a problem of "free-loaders", nodes that use the network but refuse to route other nodes' packets. In this paper we study ways of designing incentives to discourage free-loading. We model the interactions between nodes as a "random matching game", and describe a simple reputation system that provides incentives for good behavior. Under certain assumptions, we obtain a stable subgame-perfect equilibrium. We use simulations to investigate the robustness of this scheme in the presence of noise and malicious nodes, and we examine some of the design trade-offs. We also evaluate some possible adversarial strategies, and discuss how our results might apply to real peer-to-peer systems},
   8149         www_section = {economics, free-loader, free-loading, peer-to-peer networking, system design},
   8150         isbn = {0743-166X },
   8151         issn = {0-7803-8968-9 },
   8152         doi = {10.1109/INFCOM.2005.1497907 },
   8153         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Designing\%20incentives\%20for\%20peer-to-peer\%20routing.pdf},
   8154         author = {Alberto Blanc and Yi-Kai Liu and Vahdat, Amin}
   8155 }
   8156 @conference {1251207,
   8157         title = {Detecting BGP configuration faults with static analysis},
   8158         booktitle = {NSDI'05: Proceedings of the 2nd conference on Symposium on Networked Systems Design \& Implementation},
   8159         year = {2005},
   8160         pages = {43--56},
   8161         publisher = {USENIX Association},
   8162         organization = {USENIX Association},
   8163         address = {Berkeley, CA, USA},
   8164         abstract = {The Internet is composed of many independent autonomous systems (ASes) that exchange reachability information to destinations using the Border Gateway Protocol (BGP). Network operators in each AS configure BGP routers to control the routes that are learned, selected, and announced to other routers. Faults in BGP configuration can cause forwarding loops, packet loss, and unintended paths between hosts, each of which constitutes a failure of the Internet routing infrastructure.
   8165 
   8166 This paper describes the design and implementation of rcc, the router configuration checker, a tool that finds faults in BGP configurations using static analysis. rcc detects faults by checking constraints that are based on a high-level correctness specification. rcc detects two broad classes of faults: route validity faults, where routers may learn routes that do not correspond to usable paths, and path visibility faults, where routers may fail to learn routes for paths that exist in the network. rcc enables network operators to test and debug configurations before deploying them in an operational network, improving on the status quo where most faults are detected only during operation. rcc has been downloaded by more than sixty-five network operators to date, some of whom have shared their configurations with us. We analyze network-wide configurations from 17 different ASes to detect a wide variety of faults and use these findings to motivate improvements to the Internet routing infrastructure},
   8167         www_section = {autonomous systems, border gateway protocol},
   8168         url = {http://portal.acm.org/citation.cfm?id=1251207$\#$},
   8169         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.113.5668.pdf},
   8170         author = {Nick Feamster and Hari Balakrishnan}
   8171 }
   8172 @conference {Khorshadi:2005:DPR:1090948.1091369,
   8173         title = {Determining the Peer Resource Contributions in a P2P Contract},
   8174         booktitle = {HOT-P2P 2005. Proceedings of the Second International Workshop on Hot Topics in Peer-to-Peer Systems},
   8175         year = {2005},
   8176         month = jul,
   8177         pages = {2--9},
   8178         publisher = {IEEE Computer Society},
   8179         organization = {IEEE Computer Society},
   8180         address = {La Jolla, California, USA},
   8181         abstract = {In this paper we study a scheme called P2P contract which explicitly specifies the resource contributions that are required from the peers. In particular, we consider a P2P file sharing system in which when a peer downloads the file it is required to serve the file to upto N other peers within a maximum period of time T. We study the behavior of this contribution scheme in both centralized and decentralized P2P networks. In a centralized architecture, new requests are forwarded to a central server which hands out the contract along with a list of peers from where the file can be downloaded. We show that a simple fixed contract (i.e., fixed values of N and T) is sufficient to create the required server capacity which adapts to the load. Furthermore, we show that T, the time part of the contract is a more important control parameter than N. In the case of a decentralized P2P architecture, each new request is broadcast to a certain neighborhood determined by the time-to-live (TTL) parameter. Each server receiving the request independently doles out a contract and the requesting peer chooses the one which is least constraining. If there are no servers in the neighborhood, the request fails. To achieve a good request success ratio, we propose an adaptive scheme to set the contracts without requiring global information. Through both analysis and simulation, we show that the proposed scheme adapts to the load and achieves low request failure rate with high server efficiency},
   8182         www_section = {contracts, P2P, peer resource contribution, peer-to-peer networking},
   8183         isbn = {0-7695-2417-6},
   8184         doi = {10.1109/HOT-P2P.2005.9},
   8185         url = {http://dl.acm.org/citation.cfm?id=1090948.1091369},
   8186         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HOT-P2P\%2705\%20-\%20Khorshadi\%2C\%20Liu\%20\%26\%20Ghosal.pdf},
   8187         author = {Khorshadi, Behrooz and Liu, Xin and Dipak Ghosal}
   8188 }
   8189 @book {2005_3,
   8190         title = {Distributed Hash Tables},
   8191         booktitle = {Peer-to-Peer Systems and Applications},
   8192         series = {Lecture Notes in Computer Science},
   8193         volume = {3485},
   8194         year = {2005},
   8195         publisher = {Springer},
   8196         organization = {Springer},
   8197         chapter = {7},
   8198         abstract = {In the last few years, an increasing number of massively distributed systems with millions of participants has emerged within very short time frames. Applications, such as instant messaging, file-sharing, and content distribution have attracted countless numbers of users. For example, Skype gained more than 2.5 millions of users within twelve months, and more than 50\% of Internet traffic is originated by BitTorrent. These very large and still rapidly growing systems attest to a new era for the design and deployment of distributed systems. In particular, they reflect what the major challenges are today for designing and implementing distributed systems: scalability, flexibility, and instant deployment},
   8199         www_section = {distributed hash table},
   8200         doi = {10.1007/11530657_7},
   8201         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LNCS\%20-\%20Distributed\%20Hash\%20Tables.pdf},
   8202         author = {Klaus Wehrle and G{\"o}tz, Stefan and Rieche, Simon}
   8203 }
   8204 @conference {2005_4,
   8205         title = {An empirical study of free-riding behavior in the maze p2p file-sharing system},
   8206         booktitle = {Proceedings of the 4th international conference on Peer-to-Peer Systems},
   8207         year = {2005},
   8208         publisher = {Springer-Verlag},
   8209         organization = {Springer-Verlag},
   8210         address = {Berlin, Heidelberg},
   8211         www_section = {free-riding, incentives, Sybil attack},
   8212         isbn = {3-540-29068-0, 978-3-540-29068-1},
   8213         doi = {10.1007/11558989_17},
   8214         url = {http://dx.doi.org/10.1007/11558989_17},
   8215         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/maze_freeride.pdf},
   8216         author = {Yang, Mao and Zhang, Zheng and Li, Xiaoming and Dai, Yafei}
   8217 }
   8218 @article {Bickson05theemule,
   8219         title = {The eMule Protocol Specification},
   8220         number = {TR-2005-03},
   8221         year = {2005},
   8222         month = jan,
   8223         institution = {Leibniz Center, School of Computer Science and Engineering, The Hebrew University},
   8224         type = {Tech report},
   8225         address = {Jerusalem, Israel},
   8226         abstract = {this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitle "GNU Free Documentation License"},
   8227         journal = {unknown},
   8228         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.7750},
   8229         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.60.7750_0.pdf},
   8230         author = {Yoram Kulbak and Danny Bickson}
   8231 }
   8232 @conference {Wang05erasure-codingbased,
   8233         title = {Erasure-coding based routing for opportunistic networks},
   8234         booktitle = {Erasure-coding based routing for opportunistic networks},
   8235         year = {2005},
   8236         pages = {229--236},
   8237         publisher = {ACM Press},
   8238         organization = {ACM Press},
   8239         abstract = {mobility is a challenging problem because disconnections are prevalent and lack of knowledge about network dynamics hinders good decision making. Current approaches are primarily based on redundant transmissions. They have either high overhead due to excessive transmissions or long delays due to the possibility of making wrong choices when forwarding a few redundant copies. In this paper, we propose a novel forwarding algorithm based on the idea of erasure codes. Erasure coding allows use of a large number of relays while maintaining a constant overhead, which results in fewer cases of long delays. We use simulation to compare the routing performance of using erasure codes in DTN with four other categories of forwarding algorithms proposed in the literature. Our simulations are based on a real-world mobility trace collected in a large outdoor wild-life environment. The results show that the erasure-coding based algorithm provides the best worst-case delay performance with a fixed amount of overhead. We also present a simple analytical model to capture the delay characteristics of erasure-coding based forwarding, which provides insights on the potential of our approach},
   8240         www_section = {delay tolerant network, routing},
   8241         doi = {10.1145/1080139.1080140},
   8242         url = {http://doi.acm.org/10.1145/1080139.1080140},
   8243         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.79.364.pdf},
   8244         author = {Wang, Yong and Sushant Jain and Martonosi, Margaret and Fall, Kevin}
   8245 }
   8246 @conference {2005_5,
   8247         title = {Exchange-based incentive mechanisms for peer-to-peer file sharing},
   8248         booktitle = {Proceedings of International Conference on Distributed Computing Systems 2004},
   8249         year = {2005},
   8250         month = mar,
   8251         pages = {524--533},
   8252         publisher = {IEEE Computer Society},
   8253         organization = {IEEE Computer Society},
   8254         address = {Tokyo, Japan},
   8255         abstract = {Performance of peer-to-peer resource sharing networks depends upon the level of cooperation of the participants. To date, cash-based systems have seemed too complex, while lighter-weight credit mechanisms have not provided strong incentives for cooperation. We propose exchange-based mechanisms that provide incentives for cooperation in peer-to-peer file sharing networks. Peers give higher service priority to requests from peers that can provide a simultaneous and symmetric service in return. We generalize this approach to n-way exchanges among rings of peers and present a search algorithm for locating such rings. We have used simulation to analyze the effect of exchanges on performance. Our results show that exchange-based mechanisms can provide strong incentives for sharing, offering significant improvements in service times for sharing users compared to free-riders, without the problems and complexity of cash- or credit-based systems},
   8256         www_section = {exchange-based mechanism, peer-to-peer networking, sharing},
   8257         isbn = {0-7695-2086-3 },
   8258         doi = {10.1109/ICDCS.2004.1281619},
   8259         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2704.pdf},
   8260         author = {Kostas G. Anagnostakis and Michael B. Greenwald}
   8261 }
   8262 @conference {conf/infocom/GollapudiSZ05,
   8263         title = {Exploiting anarchy in networks: a game-theoretic approach to combining fairness and throughput},
   8264         booktitle = {INFOCOM 2005. Proceedings of the 24th Annual Joint Conference of the IEEE Computer and Communications Societies},
   8265         year = {2005},
   8266         month = mar,
   8267         pages = {2147--2158},
   8268         publisher = {IEEE Computer Society},
   8269         organization = {IEEE Computer Society},
   8270         address = {Miami, FL, USA},
   8271         abstract = {We propose a novel mechanism for routing and bandwidth allocation that exploits the selfish and rational behavior of flows in a network. Our mechanism leads to allocations that simultaneously optimize throughput and fairness criteria. We analyze the performance of our mechanism in terms of the induced Nash equilibrium. We compare the allocations at the Nash equilibrium with throughput-optimal allocations as well as with fairness-optimal allocations. Our mechanism offers a smooth trade-off between these criteria, and allows us to produce allocations that are approximately optimal with respect to both. Our mechanism is also fairly simple and admits an efficient distributed implementation},
   8272         www_section = {bandwidth allocation, dblp, nash equilibrium, routing allocation},
   8273         isbn = {0-7803-8968-9 },
   8274         doi = {10.1109/INFCOM.2005.1498490  },
   8275         url = {http://dblp.uni-trier.de/db/conf/infocom/infocom2005.html$\#$GollapudiSZ05},
   8276         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Exploiting\%20anarchy\%20in\%20networks.pdf},
   8277         author = {Gollapudi, Sreenivas and Sivakumar, D. and Zhang, Aidong}
   8278 }
   8279 @article {10.1109/MOBIQUITOUS.2005.29,
   8280         title = {Exploiting co-location history for ef.cient service selection in ubiquitous computing systems},
   8281         journal = {Mobile and Ubiquitous Systems, Annual International Conference on},
   8282         year = {2005},
   8283         pages = {202--212},
   8284         publisher = {IEEE Computer Society},
   8285         address = {Los Alamitos, CA, USA},
   8286         abstract = {As the ubiquitous computing vision materializes, the number and diversity of digital elements in our environment increases. Computing capability comes in various forms and is embedded in different physical objects, ranging from miniature devices such as human implants and tiny sensor particles, to large constructions such as vehicles and entire buildings. The number of possible interactions among such elements, some of which may be invisible or offer similar functionality, is growing fast so that it becomes increasingly hard to combine or select between them. Mechanisms are thus required for intelligent matchmaking that will achieve controlled system behavior, yet without requiring the user to continuously input desirable options in an explicit manner. In this paper we argue that information about the colocation relationship of computing elements is quite valuable in this respect and can be exploited to guide automated service selection with minimal or no user involvement. We also discuss the implementation of such mechanism that is part of our runtime system for smart objects},
   8287         isbn = {0-7695-2375-7},
   8288         doi = {10.1109/MOBIQUITOUS.2005.29},
   8289         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/MOBIQUITOUS.2005.29},
   8290         author = {Alexandros Karypidis and Spyros Lalis}
   8291 }
   8292 @article {10.1109/MASCOT.2005.73,
   8293         title = {The Feasibility of DHT-based Streaming Multicast},
   8294         journal = {2012 IEEE 20th International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems},
   8295         year = {2005},
   8296         pages = {288--298},
   8297         publisher = {IEEE Computer Society},
   8298         address = {Los Alamitos, CA, USA},
   8299         issn = {1526-7539},
   8300         doi = {http://doi.ieeecomputersociety.org/10.1109/MASCOT.2005.73},
   8301         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SBirrer-dhtBasedMulticast_0.pdf},
   8302         author = {Stefan Birrer and Fabian E. Bustamante}
   8303 }
   8304 @conference {Wang05findingcollisions,
   8305         title = {Finding Collisions in the Full SHA-1},
   8306         booktitle = {In Proceedings of Crypto},
   8307         year = {2005},
   8308         pages = {17--36},
   8309         publisher = {Springer},
   8310         organization = {Springer},
   8311         abstract = {In this paper, we present new collision search attacks on the hash function SHA-1. We show that collisions of SHA-1 can be found with complexity less than 2 69 hash operations. This is the first attack on the full 80-step SHA-1 with complexity less than the 2 80 theoretical bound. Keywords: Hash functions, collision search attacks, SHA-1, SHA-0. 1},
   8312         www_section = {cryptography},
   8313         isbn = {978-3-540-28114-6},
   8314         doi = {10.1007/11535218},
   8315         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.94.4261},
   8316         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SHA1AttackProceedingVersion.pdf},
   8317         author = {Xiaoyun Wang and Yiqun Lisa Yin and Hongbo Yu}
   8318 }
   8319 @book {DBLP:conf/p2p/EberspacherS05a,
   8320         title = {First and Second Generation of Peer-to-Peer Systems},
   8321         booktitle = {Peer-to-Peer Systems and Applications},
   8322         series = {Lecture Notes in Computer Science},
   8323         volume = {3485},
   8324         year = {2005},
   8325         pages = {35--56},
   8326         publisher = {Springer Berlin / Heidelberg},
   8327         organization = {Springer Berlin / Heidelberg},
   8328         abstract = {Peer-to-Peer (P2P) networks appeared roughly around the year 2000 when a broadband Internet infrastructure (even at the network edge) became widely available. Other than traditional networks Peer-to-Peer networks do not rely on a specific infrastructure offering transport services. Instead they form {\textquotedblleft}overlay structures{\textquotedblright} focusing on content allocation and distribution based on TCP or HTTP connections. Whereas in a standard Client-Server configuration content is stored and provided only via some central server(s), Peer-to-Peer networks are highly decentralized and locate a desired content at some participating peer and provide the corresponding IP address of that peer to the searching peer. The download of that content is then initiated using a separate connection, often using HTTP. Thus, the high load usually resulting for a central server and its surrounding network is avoided leading to a more even distribution of load on the underlying physical network. On the other hand, such networks are typically subject to frequent changes because peers join and leave the network without any central control},
   8329         www_section = {generation, P2P, peer-to-peer networking},
   8330         author = {J{\"o}rg Ebersp{\"a}cher and R{\"u}diger Schollmeier}
   8331 }
   8332 @conference {rhea2005fixing,
   8333         title = {Fixing the embarrassing slowness of OpenDHT on PlanetLab},
   8334         booktitle = {Proc. of the Second USENIX Workshop on Real, Large Distributed Systems},
   8335         year = {2005},
   8336         pages = {25--30},
   8337         www_section = {distributed hash table, openDHT, peer-to-peer, PlanetLab},
   8338         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/opendht-fixing.pdf},
   8339         author = {Rhea, S. and Chun, B.G. and Kubiatowicz, J. and S Shenker}
   8340 }
   8341 @conference {Fu::FlowMarking::2005,
   8342         title = {On Flow Marking Attacks in Wireless Anonymous Communication Networks},
   8343         booktitle = {Proceedings of the IEEE International Conference on Distributed Computing Systems (ICDCS)},
   8344         year = {2005},
   8345         month = {April},
   8346         publisher = {IEEE Computer Society  Washington, DC, USA},
   8347         organization = {IEEE Computer Society  Washington, DC, USA},
   8348         abstract = {This paper studies the degradation of anonymity in a flow-based wireless mix network under flow marking attacks, in which an adversary embeds a recognizable pattern of marks into wireless traffic flows by electromagnetic interference. We find that traditional mix technologies are not effective in defeating flow marking attacks, and it may take an adversary only a few seconds to recognize the communication relationship between hosts by tracking suchartificial marks. Flow marking attacks utilize frequency domain analytical techniques and convert time domain marks into invariant feature frequencies. To counter flow marking attacks, we propose a new countermeasure based on digital filtering technology, and show that this filter-based counter-measure can effectively defend a wireless mix network from flow marking attacks},
   8349         www_section = {802.11, anonymity, Bluetooth, flow marking attack},
   8350         isbn = {0-7695-2331-5},
   8351         url = {http://portal.acm.org/citation.cfm?id=1069397},
   8352         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fu--FlowMarking--2005.pdf},
   8353         author = {Xinwen Fu and Ye Zhu and Bryan Graham and Riccardo Bettati and Wei Zhao}
   8354 }
   8355 @conference {camlys05,
   8356         title = {A Formal Treatment of Onion Routing},
   8357         booktitle = {Proceedings of CRYPTO 2005},
   8358         year = {2005},
   8359         month = {August},
   8360         pages = {169--187},
   8361         publisher = {Springer-Verlag, LNCS 3621},
   8362         organization = {Springer-Verlag, LNCS 3621},
   8363         abstract = {Anonymous channels are necessary for a multitude of privacy-protecting protocols. Onion routing is probably the best known way to achieve anonymity in practice. However, the cryptographic aspects of onion routing have not been sufficiently explored: no satisfactory definitions of security have been given, and existing constructions have only had ad-hoc security analysis for the most part.
   8364 We provide a formal definition of onion-routing in the universally composable framework, and also discover a simpler definition (similar to CCA2 security for encryption) that implies security in the UC framework. We then exhibit an efficient and easy to implement construction of an onion routing scheme satisfying this definition},
   8365         www_section = {onion routing, privacy},
   8366         isbn = {978-3-540-28114-6},
   8367         doi = {10.1007/11535218},
   8368         url = {http://www.springerlink.com/content/0jmg1krt9ph147ql/},
   8369         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camlys05.pdf},
   8370         author = {Jan Camenisch and Anna Lysyanskaya},
   8371         editor = {Victor Shoup}
   8372 }
   8373 @article {10.1109/MDSO.2005.31,
   8374         title = {Free Riding on Gnutella Revisited: The Bell Tolls?},
   8375         journal = {IEEE Distributed Systems Online},
   8376         volume = {6},
   8377         year = {2005},
   8378         month = jun,
   8379         publisher = {IEEE Computer Society},
   8380         chapter = {1},
   8381         address = {Los Alamitos, CA, USA},
   8382         abstract = {Individuals who use peer-to-peer (P2P) file-sharing networks such as Gnutella face a social dilemma. They must decide whether to contribute to the common good by sharing files or to maximize their personal experience by free riding, downloading files while not contributing any to the network. Individuals gain no personal benefits from uploading files (in fact, it's inconvenient), so it's "rational" for users to free ride. However, significant numbers of free riders degrade the entire system's utility, creating a "tragedy of the digital commons." In this article, a new analysis of free riding on the Gnutella network updates data from 2000 and points to an increasing downgrade in the network's overall performance and the emergence of a "metatragedy" of the commons among Gnutella developers},
   8383         www_section = {distributed systems, free riding, Gnutella, peer-to-peer networking},
   8384         issn = {1541-4922},
   8385         doi = {http://doi.ieeecomputersociety.org/10.1109/MDSO.2005.31},
   8386         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20DSO\%20-\%20Free\%20riding\%20on\%20Gnutella\%20revisited.pdf},
   8387         author = {Daniel Hughes and Geoff Coulson and James Walkerdine}
   8388 }
   8389 @conference {2005_6,
   8390         title = {Fuzzy Identity-Based Encryption},
   8391         booktitle = {EUROCRYPT'05 Workshop on the Theory and Application of of Cryptographic Techniques},
   8392         series = {Lecture Notes in Computer Science},
   8393         volume = {3494},
   8394         year = {2005},
   8395         month = may,
   8396         publisher = {Springer},
   8397         organization = {Springer},
   8398         address = {Aarhus, Denmark  },
   8399         abstract = {We introduce a new type of Identity-Based Encryption (IBE) scheme that we call Fuzzy Identity-Based Encryption. In Fuzzy IBE we view an identity as set of descriptive attributes. A Fuzzy IBE scheme allows for a private key for an identity, ω, to decrypt a ciphertext encrypted with an identity, ω , if and only if the identities ω and ω are close to each other as measured by the {\textquotedblleft}set overlap{\textquotedblright} distance metric. A Fuzzy IBE scheme can be applied to enable encryption
   8400 using biometric inputs as identities; the error-tolerance property of a Fuzzy IBE scheme is precisely what allows for the use of biometric identities, which inherently will have some noise each time they are sampled. Additionally, we show that Fuzzy-IBE can be used for a type of application that we term {\textquotedblleft}attribute-based encryption{\textquotedblright}.
   8401 In this paper we present two constructions of Fuzzy IBE schemes. Our constructions can be viewed as an Identity-Based Encryption of a message under several attributes that compose a (fuzzy) identity. Our IBE schemes are both error-tolerant and secure against collusion attacks. Additionally, our basic construction does not use random oracles. We prove the security of our schemes under the Selective-ID security model},
   8402         www_section = {Fuzzy IBE, IBE},
   8403         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2705\%20-\%20Fuzzy\%20Identity-Based\%20Encryption.pdf},
   8404         author = {Amit Sahai and Waters, Brent}
   8405 }
   8406 @article {Jelasity:2005:GAL:1082469.1082470,
   8407         title = {Gossip-based aggregation in large dynamic networks},
   8408         journal = {ACM Transactions on Computer Systems},
   8409         volume = {23},
   8410         year = {2005},
   8411         month = {August},
   8412         pages = {219--252},
   8413         publisher = {ACM},
   8414         address = {New York, NY, USA},
   8415         abstract = {As computer networks increase in size, become more heterogeneous and span greater geographic distances, applications must be designed to cope with the very large scale, poor reliability, and often, with the extreme dynamism of the underlying network. Aggregation is a key functional building block for such applications: it refers to a set of functions that provide components of a distributed system access to global information including network size, average load, average uptime, location and description of hotspots, and so on. Local access to global information is often very useful, if not indispensable for building applications that are robust and adaptive. For example, in an industrial control application, some aggregate value reaching a threshold may trigger the execution of certain actions; a distributed storage system will want to know the total available free space; load-balancing protocols may benefit from knowing the target average load so as to minimize the load they transfer. We propose a gossip-based protocol for computing aggregate values over network components in a fully decentralized fashion. The class of aggregate functions we can compute is very broad and includes many useful special cases such as counting, averages, sums, products, and extremal values. The protocol is suitable for extremely large and highly dynamic systems due to its proactive structure---all nodes receive the aggregate value continuously, thus being able to track any changes in the system. The protocol is also extremely lightweight, making it suitable for many distributed applications including peer-to-peer and grid computing systems. We demonstrate the efficiency and robustness of our gossip-based protocol both theoretically and experimentally under a variety of scenarios including node and communication failures},
   8416         www_section = {Gossip-based protocols, proactive aggregation},
   8417         issn = {0734-2071},
   8418         doi = {http://doi.acm.org/10.1145/1082469.1082470},
   8419         url = {http://doi.acm.org/10.1145/1082469.1082470},
   8420         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Jelasity\%2C\%20Montresor\%20\%26\%20Babaoglu\%20-\%20Gossip-based\%20aggregation.pdf},
   8421         author = {M{\'a}rk Jelasity and Alberto Montresor and Babaoglu, Ozalp}
   8422 }
   8423 @conference {Godfrey05heterogeneityand,
   8424         title = {Heterogeneity and Load Balance in Distributed Hash Tables},
   8425         booktitle = {IN PROC. OF IEEE INFOCOM},
   8426         year = {2005},
   8427         abstract = {Existing solutions to balance load in DHTs incur a high overhead either in terms of routing state or in terms of load movement generated by nodes arriving or departing the system. In this paper, we propose a set of general techniques and use them to develop a protocol based on Chord, called Y0 , that achieves load balancing with minimal overhead under the typical assumption that the load is uniformly distributed in the identifier space. In particular, we prove that Y0 can achieve near-optimal load balancing, while moving little load to maintain the balance and increasing the size of the routing tables by at most a constant factor},
   8428         www_section = {Chord, distributed hash table, load balancing},
   8429         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.6740},
   8430         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper.pdf},
   8431         author = {Godfrey, Brighten and Ion Stoica}
   8432 }
   8433 @conference {Feldman:2005:HMR:1064009.1064022,
   8434         title = {Hidden-action in multi-hop routing},
   8435         booktitle = {EC'05. Proceedings of the 6th ACM Conference on Electronic Commerce},
   8436         series = {EC '05},
   8437         year = {2005},
   8438         month = jun,
   8439         pages = {117--126},
   8440         publisher = {ACM},
   8441         organization = {ACM},
   8442         address = {Vancouver, Canada},
   8443         abstract = {In multi-hop networks, the actions taken by individual intermediate nodes are typically hidden from the communicating endpoints; all the endpoints can observe is whether or not the end-to-end transmission was successful. Therefore, in the absence of incentives to the contrary, rational (i.e., selfish) intermediate nodes may choose to forward packets at a low priority or simply not forward packets at all. Using a principal-agent model, we show how the hidden-action problem can be overcome through appropriate design of contracts, in both the direct (the endpoints contract with each individual router) and recursive (each router contracts with the next downstream router) cases. We further demonstrate that per-hop monitoring does not necessarily improve the utility of the principal or the social welfare in the system. In addition, we generalize existing mechanisms that deal with hidden-information to handle scenarios involving both hidden-information and hidden-action},
   8444         www_section = {contracts, hidden-action, incentives, mechanism design, moral-hazard, multi-hop, principal-agent model, routing},
   8445         isbn = {1-59593-049-3},
   8446         doi = {http://doi.acm.org/10.1145/1064009.1064022},
   8447         url = {http://doi.acm.org/10.1145/1064009.1064022},
   8448         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2705\%20-\%20Hidden-action\%20in\%20multi-hop\%20routing.pdf},
   8449         author = {Michal Feldman and John Chuang and Ion Stoica and S Shenker}
   8450 }
   8451 @conference {IPTPS05,
   8452         title = {High Availability in DHTs: Erasure Coding vs. Replication},
   8453         booktitle = {IPTPS'05--Proceedings of the 4th International Workshop in Peer-to-Peer Systems},
   8454         series = {Lecture Notes in Computer Science},
   8455         volume = {3640},
   8456         year = {2005},
   8457         month = feb,
   8458         publisher = {Springer},
   8459         organization = {Springer},
   8460         address = {Ithaca, New York},
   8461         abstract = {High availability in peer-to-peer DHTs requires data redundancy. This paper compares two popular redundancy schemes: replication and erasure coding. Unlike previous comparisons, we take the characteristics of the nodes that comprise the overlay into account, and conclude that in some cases the benefits from coding are limited, and may not be worth its disadvantages},
   8462         www_section = {distributed hash table, erasure coding, high availability, peer-to-peer networking, redundancy, Replication},
   8463         doi = {10.1007/11558989_21},
   8464         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2705\%20-\%20High\%20availability\%20in\%20DHTs\%3A\%20erasure\%20coding\%20vs.\%20replication.pdf},
   8465         author = {Rodrigues, Rodrigo and Barbara Liskov}
   8466 }
   8467 @conference {Acedanski05howgood,
   8468         title = {How good is random linear coding based distributed networked storage?},
   8469         booktitle = {NetCod'05--First Workshop on Network Coding, Theory, and Applications},
   8470         year = {2005},
   8471         month = apr,
   8472         publisher = {Citeseer},
   8473         organization = {Citeseer},
   8474         address = {Riva del Garda, Italy},
   8475         abstract = {We consider the problem of storing a large file or multiple large files in a distributed manner over a network. In the framework we consider, there are multiple storage locations, each of which only have very limited storage space for each file. Each storage location chooses a part (or a coded version of the parts) of the file without the knowledge of what is stored in the other locations. We want a file-downloader to connect to as few storage locations as possible and retrieve the entire file. We compare the performance of three strategies: uncoded storage, traditional erasure coding based storage, random linear coding based storage motivated by network coding. We demonstrate that, in principle, a traditional erasure coding based storage (eg: Reed-Solomon Codes) strategy can almost do as well as one can ask for with appropriate choice of parameters. However, the cost is a large amount of additional storage space required at the centralized server before distribution among multiple locations. The random linear coding based strategy performs as well without suffering from any such disadvantage. Further, with a probability close to one, the minimum number of storage location a downloader needs to connect to (for reconstructing the entire file), can be very close to the case where there is complete coordination between the storage locations and the downloader. We also argue that an uncoded strategy performs poorly},
   8476         www_section = {distributed networked storage, limited storage, linear coding, multiple storage locations},
   8477         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetCod\%2705\%20-\%20Random\%20linear\%20coding\%20based\%20distributed\%20networked\%20storage.pdf},
   8478         author = {Szymon Aceda{\'n}ski and Supratim Deb and Muriel M{\'e}dard and Ralf Koetter}
   8479 }
   8480 @article {Zoels05thehybrid,
   8481         title = {The Hybrid Chord Protocol: A Peer-to-peer Lookup Service for Context-Aware Mobile Applications},
   8482         journal = {IEEE ICN, Reunion Island, April 2005. LNCS 3421},
   8483         year = {2005},
   8484         abstract = {A fundamental problem in Peer-to-Peer (P2P) overlay networks is how to efficiently find a node that shares a requested object. The Chord protocol is a distributed lookup protocol addressing this problem using hash keys to identify the nodes in the network and also the shared objects. However, when a node joins or leaves the Chord ring, object references have to be rearranged in order to maintain the hash key mapping rules. This leads to a heavy traffic load, especially when nodes stay in the Chord ring only for a short time. In mobile scenarios storage capacity, transmission data rate and battery power are limited resources, so the heavy traffic load generated by the shifting of object references can lead to severe problems when using Chord in a mobile scenario. In this paper, we present the Hybrid Chord Protocol (HCP). HCP solves the problem of frequent joins and leaves of nodes. As a further improvement of an efficient search, HCP supports the grouping of shared objects in interest groups. Our concept of using information profiles to describe shared objects allows defining special interest groups (context spaces) and a shared object to be available in multiple context spaces},
   8485         www_section = {Chord, hybrid encryption, P2P},
   8486         isbn = {978-3-540-25338-9},
   8487         issn = {0302-9743},
   8488         doi = {10.1007/b107118},
   8489         url = {http://www.springerlink.com/content/pdn9ttp0bvk0f3e9/},
   8490         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.68.7579.pdf},
   8491         author = {Stefan Z{\"o}ls and R{\"u}diger Schollmeier and Wolfgang Kellerer and Anthony Tarlano}
   8492 }
   8493 @conference {1103797,
   8494         title = {Hydra: a platform for survivable and secure data storage systems},
   8495         booktitle = {StorageSS '05: Proceedings of the 2005 ACM workshop on Storage security and survivability},
   8496         year = {2005},
   8497         pages = {108--114},
   8498         publisher = {ACM},
   8499         organization = {ACM},
   8500         address = {New York, NY, USA},
   8501         abstract = {This paper introduces Hydra, a platform that we are developing for highly survivable and secure data storage systems that distribute information over networks and adapt timely to environment changes, enabling users to store and access critical data in a continuously available and highly trustable fashion. The Hydra platform uses MDS array codes that can be encoded and decoded efficiently for distributing and recovering user data. Novel uses of MDS array codes in Hydra are discussed, as well as Hydra's design goals, general structures and a set of basic operations on user data. We also explore Hydra's applications in survivable and secure data storage systems},
   8502         www_section = {storage},
   8503         isbn = {1-59593-233-X},
   8504         doi = {10.1145/1103780.1103797},
   8505         url = {http://portal.acm.org/citation.cfm?id=1103797$\#$},
   8506         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/w8paper13.pdf},
   8507         author = {Lihao Xu}
   8508 }
   8509 @conference {1095944,
   8510         title = {Impacts of packet scheduling and packet loss distribution on FEC Performances: observations and recommendations},
   8511         booktitle = {CoNEXT'05: Proceedings of the 2005 ACM conference on Emerging network experiment and technology},
   8512         year = {2005},
   8513         pages = {166--176},
   8514         publisher = {ACM Press},
   8515         organization = {ACM Press},
   8516         address = {New York, NY, USA},
   8517         abstract = {Forward Error Correction (FEC) is commonly used for content broadcasting. The performance of the FEC codes largely vary, depending in particular on the code used and on the object size, and these parameters have already been studied in detail by the community. However the FEC performances are also largely dependent on the packet scheduling used during transmission and on the loss pattern introduced by the channel. Little attention has been devoted to these aspects so far. Therefore the present paper analyzes their impacts on the three FEC codes: LDGM Staircase, LDGM Triangle, two large block codes, and Reed-Solomon. Thanks to this analysis, we define several recommendations on how to best use these codes, depending on the test case and on the channel, which turns out to be of utmost importance},
   8518         www_section = {forward error correction, LDPC, loss pattern, multicast, packet scheduling, Reed-Solomon},
   8519         isbn = {1-59593-197-X},
   8520         doi = {10.1145/1095921.1095944},
   8521         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.63.8807},
   8522         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RR-5578.pdf},
   8523         author = {Christoph Neumann and Aur{\'e}lien Francillon and David Furodet}
   8524 }
   8525 @article {1646697,
   8526         title = {Improving delivery ratios for application layer multicast in mobile ad hoc networks},
   8527         journal = {Comput. Commun},
   8528         volume = {28},
   8529         number = {14},
   8530         year = {2005},
   8531         pages = {1669--1679},
   8532         publisher = {Butterworth-Heinemann},
   8533         address = {Newton, MA, USA},
   8534         abstract = {Delivering multicast data using application layer approaches offers different advantages, as group members communicate using so-called overlay networks. These consist of a multicast group's members connected by unicast tunnels. Since existing approaches for application layer delivery of multicast data in mobile ad hoc networks (short MANETs) only deal with routing but not with error recovery, this paper evaluates tailored mechanisms for handling packet losses and congested networks. Although illustrated at the example of a specific protocol, the mechanisms may be applied to arbitrary overlays. This paper also investigates how application layer functionality based on overlay networks can turn existing multicast routing protocols (like ODMRP, M-AODV,...) into (almost) reliable transport protocols},
   8535         www_section = {mobile Ad-hoc networks, multicast, reliability},
   8536         issn = {0140-3664},
   8537         doi = {10.1016/j.comcom.2005.02.008},
   8538         url = {http://portal.acm.org/citation.cfm?id=1646697$\#$},
   8539         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.68.5832.pdf},
   8540         author = {Baumung, Peter and Martina Zitterbart and Kendy Kutzner}
   8541 }
   8542 @conference {Jun:2005:IBI:1080192.1080199,
   8543         title = {Incentives in BitTorrent Induce Free Riding},
   8544         booktitle = {P2PECON'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems},
   8545         series = {P2PECON '05},
   8546         year = {2005},
   8547         month = aug,
   8548         pages = {116--121},
   8549         publisher = {ACM},
   8550         organization = {ACM},
   8551         address = {Philadelphia, Pennsylvania, USA},
   8552         abstract = {We investigate the incentive mechanism of BitTorrent, which is a peer-to-peer file distribution system. As downloaders in BitTorrent are faced with the conflict between the eagerness to download and the unwillingness to upload, we relate this problem to the iterated prisoner's dilemma, which suggests guidelines to design a good incentive mechanism. Based on these guidelines, we propose a new, simple incentive mechanism. Our analysis and the experimental results using PlanetLab show that the original incentive mechanism of BitTorrent can induce free riding because it is not effective in rewarding and punishing downloaders properly. In contrast, a new mechanism proposed by us is shown to be more robust against free riders},
   8553         www_section = {BitTorrent, data dissemination, prisoner's dilemma, strategy},
   8554         isbn = {1-59593-026-4},
   8555         doi = {http://doi.acm.org/10.1145/1080192.1080199},
   8556         url = {http://doi.acm.org/10.1145/1080192.1080199},
   8557         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20Incentives\%20in\%20BitTorrent\%20induce\%20free\%20riding.pdf},
   8558         author = {Jun, Seung and Ahamad, Mustaque}
   8559 }
   8560 @conference {Andrade:2005:ICB:1080192.1080198,
   8561         title = {Influences on cooperation in BitTorrent communities},
   8562         booktitle = {P2PEcon'05. Proceedings of the 2005 ACM SIGCOMM workshop on Economics of peer-to-peer systems},
   8563         series = {P2PECON '05},
   8564         year = {2005},
   8565         month = aug,
   8566         pages = {111--115},
   8567         publisher = {ACM},
   8568         organization = {ACM},
   8569         address = {Philadelphia, Pennsylvania, USA},
   8570         abstract = {We collect BitTorrent usage data across multiple file-sharing communities and analyze the factors that affect users' cooperative behavior. We find evidence that the design of the BitTorrent protocol results in increased cooperative behavior over other P2P protocols used to share similar content (e.g. Gnutella). We also investigate two additional community-specific mechanisms that foster even more cooperation},
   8571         www_section = {BitTorrent, cooperation, P2P},
   8572         isbn = {1-59593-026-4},
   8573         doi = {http://doi.acm.org/10.1145/1080192.1080198},
   8574         url = {http://doi.acm.org/10.1145/1080192.1080198},
   8575         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/coopbittorrentcom_0.pdf},
   8576         author = {Nazareno Andrade and Miranda Mowbray and Lima, Aliandro and Wagner, Gustavo and Ripeanu, Matei}
   8577 }
   8578 @proceedings {2005_7,
   8579         booktitle = {On the Interaction Between Overlay Routing and Underlay Routing },
   8580         journal = {IEEE INFOCOM '05},
   8581         year = {2005},
   8582         pages = {2543--2553},
   8583         editor = {unknown},
   8584         author = {Yong Liu and Honggang Zhang and Weibo Gong and Don Towsley}
   8585 }
   8586 @conference {cramer05isprp,
   8587         title = {ISPRP: A Message-Efficient Protocol for Initializing Structured P2P Networks},
   8588         booktitle = {Proceedings of the 24th IEEE International Performance, Computing, and Communications Conference (IPCCC)},
   8589         year = {2005},
   8590         pages = {365--370},
   8591         type = {publication},
   8592         address = {Phoenix, AZ},
   8593         abstract = {Most research activities in the field of peer-to-peer (P2P) computing are concerned with routing in virtualized overlay networks. These overlays generally assume node connectivity to be provided by an underlying network-layer routing protocol. This duplication of functionality can give rise to severe inefficiencies. In contrast, we suggest a cross-layer approach where the P2P overlay network also provides the required network-layer routing functionality by itself. Especially in sensor networks, where special attention has to be paid to the nodes' limited capabilities, this can greatly help in reducing the message overhead. In this paper, we present a key building block for such a protocol, the iterative successor pointer rewiring protocol (ISPRP), which efficiently initializes a P2P routing network among a freshly deployed set of nodes having but link-layer connectivity. ISPRP works in a fully self-organizing way and issues only a small per-node amount of messages by keeping interactions between nodes as local as possible},
   8594         www_section = {P2P},
   8595         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   8596         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer05isprp.pdf},
   8597         author = {Cramer, Curt and Thomas Fuhrmann}
   8598 }
   8599 @article {1064217,
   8600         title = {On lifetime-based node failure and stochastic resilience of decentralized peer-to-peer networks},
   8601         journal = {SIGMETRICS Perform. Eval. Rev},
   8602         volume = {33},
   8603         number = {1},
   8604         year = {2005},
   8605         pages = {26--37},
   8606         publisher = {ACM},
   8607         address = {New York, NY, USA},
   8608         abstract = {To understand how high rates of churn and random departure decisions of end-users affect connectivity of P2P networks, this paper investigates resilience of random graphs to lifetime-based node failure and derives the expected delay before a user is forcefully isolated from the graph and the probability that this occurs within his/her lifetime. Our results indicate that systems with heavy-tailed lifetime distributions are more resilient than those with light-tailed (e.g., exponential) distributions and that for a given average degree, k-regular graphs exhibit the highest resilience. As a practical illustration of our results, each user in a system with n = 100 billion peers, 30-minute average lifetime, and 1-minute node-replacement delay can stay connected to the graph with probability 1--1 n using only 9 neighbors. This is in contrast to 37 neighbors required under previous modeling efforts. We finish the paper by showing that many P2P networks are almost surely (i.e., with probability 1-o(1)) connected if they have no isolated nodes and derive a simple model for the probability that a P2P system partitions under churn},
   8609         www_section = {P2P, pareto, stochastic lifetime resilience},
   8610         issn = {0163-5999},
   8611         doi = {10.1145/1071690.1064217},
   8612         url = {http://portal.acm.org/citation.cfm?id=1071690.1064217$\#$},
   8613         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.83.5920.pdf},
   8614         author = {Leonard, Derek and Rai, Vivek and Loguinov, Dmitri}
   8615 }
   8616 @conference {esorics05-Klonowski,
   8617         title = {Local View Attack on Anonymous Communication},
   8618         booktitle = {Proceedings of ESORICS 2005},
   8619         year = {2005},
   8620         month = {September},
   8621         publisher = {Springer Berlin / Heidelberg},
   8622         organization = {Springer Berlin / Heidelberg},
   8623         abstract = {We consider anonymous communication protocols based on onions: each message is sent in an encrypted form through a path chosen at random by its sender, and the message is re-coded by each server on the path. Recently, it has been shown that if the anonymous paths are long enough, then the protocols provide provable security for some adversary models. However, it was assumed that all users choose intermediate servers uniformly at random from the same set of servers.
   8624 We show that if a single user chooses only from a constrained subset of possible intermediate servers, anonymity level may dramatically decrease. A thumb rule is that if Alice is aware of much less than 50\% of possible intermediate servers, then the anonymity set for her message becomes surprisingly small with high probability. Moreover, for each location in the anonymity set an adversary may compute probability that it gets a message of Alice. Since there are big differences in these probabilities, in most cases the true destination of the message from Alice is in a small group of locations with the highest probabilities.
   8625 Our results contradict some beliefs that the protocols mentioned guarantee anonymity provided that the set of possible intermediate servers for each user is large},
   8626         www_section = {anonymity measurement, onion routing},
   8627         isbn = {978-3-540-28963-0},
   8628         doi = {10.1007/11555827},
   8629         url = {http://www.springerlink.com/content/ewblt5k80xrgqe4j/},
   8630         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/esorics05-Klonowski.pdf},
   8631         author = {Marcin Gogolewski and Marek Klonowski and Miroslaw Kutylowski}
   8632 }
   8633 @article {1042380,
   8634         title = {Location Awareness in Unstructured Peer-to-Peer Systems},
   8635         journal = {IEEE Trans. Parallel Distrib. Syst},
   8636         volume = {16},
   8637         number = {2},
   8638         year = {2005},
   8639         pages = {163--174},
   8640         publisher = {IEEE Press},
   8641         address = {Piscataway, NJ, USA},
   8642         abstract = {Peer-to-Peer (P2P) computing has emerged as a popular model aiming at further utilizing Internet information and resources. However, the mechanism of peers randomly choosing logical neighbors without any knowledge about underlying physical topology can cause a serious topology mismatch between the P2P overlay network and the physical underlying network. The topology mismatch problem brings great stress in the Internet infrastructure. It greatly limits the performance gain from various search or routing techniques. Meanwhile, due to the inefficient overlay topology, the flooding-based search mechanisms cause a large volume of unnecessary traffic. Aiming at alleviating the mismatching problem and reducing the unnecessary traffic, we propose a location-aware topology matching (LTM) technique. LTM builds an efficient overlay by disconnecting slow connections and choosing physically closer nodes as logical neighbors while still retaining the search scope and reducing response time for queries. LTM is scalable and completely distributed in the sense that it does not require any global knowledge of the whole overlay network. The effectiveness of LTM is demonstrated through simulation studies},
   8643         www_section = {flooding attacks, location-aware topology, P2P, search efficiency, topology matching},
   8644         issn = {1045-9219},
   8645         doi = {10.1109/TPDS.2005.21},
   8646         url = {http://portal.acm.org/citation.cfm?id=1042380$\#$},
   8647         author = {Yunhao Liu and Xiao, Li and Liu, Xiaomei and Ni, Lionel M. and Zhang, Xiaodong}
   8648 }
   8649 @conference {torta05,
   8650         title = {Low-Cost Traffic Analysis of Tor},
   8651         booktitle = {Proceedings of the 2005 IEEE Symposium on Security and Privacy},
   8652         year = {2005},
   8653         month = {May},
   8654         publisher = {IEEE CS},
   8655         organization = {IEEE CS},
   8656         abstract = {Tor is the second generation Onion Router, supporting the anonymous transport of TCP streams over the Internet. Its low latency makes it very suitable for common tasks, such as web browsing, but insecure against traffic-analysis attacks by a global passive adversary. We present new traffic-analysis techniques that allow adversaries with only a partial view of the network to infer which nodes are being used to relay the anonymous streams and therefore greatly reduce the anonymity provided by Tor. Furthermore, we show that otherwise unrelated streams can be linked back to the same initiator. Our attack is feasible for the adversary anticipated by the Tor designers. Our theoretical attacks are backed up by experiments performed on the deployed, albeit experimental, Tor network. Our techniques should also be applicable to any low latency anonymous network. These attacks highlight the relationship between the field of traffic-analysis and more traditional computer security issues, such as covert channel analysis. Our research also highlights that the inability to directly observe network links does not prevent an attacker from performing traffic-analysis: the adversary can use the anonymising network as an oracle to infer the traffic load on remote nodes in order to perform traffic-analysis},
   8657         www_section = {anonymity, onion routing, traffic analysis},
   8658         isbn = {0-7695-2339-0},
   8659         url = {http://portal.acm.org/citation.cfm?id=1059390},
   8660         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/torta05.pdf},
   8661         author = {Steven J. Murdoch and George Danezis}
   8662 }
   8663 @conference {Fiat05makingchord,
   8664         title = {Making chord robust to byzantine attacks},
   8665         booktitle = {In Proc. of the European Symposium on Algorithms (ESA)},
   8666         year = {2005},
   8667         pages = {803--814},
   8668         publisher = {Springer},
   8669         organization = {Springer},
   8670         abstract = {Chord is a distributed hash table (DHT) that requires only O(log n) links per node and performs searches with latency and message cost O(log n), where n is the number of peers in the network. Chord assumes all nodes behave according to protocol. We give a variant of Chord which is robust with high probability for any time period during which: 1) there are always at least z total peers in the network for some integer z; 2) there are never more than (1/4--{\epsilon})z Byzantine peers in the network for a fixed {\epsilon} > 0; and 3) the number of peer insertion and deletion events is no more than zk for some tunable parameter k. We assume there is an adversary controlling the Byzantine peers and that the IP-addresses of all the Byzantine peers and the locations where they join the network are carefully selected by this adversary. Our notion of robustness is rather strong in that we not only guarantee that searches can be performed but also that we can enforce any set of {\textquotedblleft}proper behavior{\textquotedblright} such as contributing new material, etc. In comparison to Chord, the resources required by this new variant are only a polylogarithmic factor greater in communication, messaging, and linking costs},
   8671         www_section = {Chord, distributed hash table, robustness},
   8672         doi = {10.1007/11561071},
   8673         url = {http://www.springerlink.com/content/422llxn7khwej72n/},
   8674         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swarm.pdf},
   8675         author = {Amos Fiat and Jared Saia and Maxwell Young}
   8676 }
   8677 @conference { wang:market-driven,
   8678         title = {Market-driven bandwidth allocation in selfish overlay networks},
   8679         booktitle = {INFOCOM'05. Proceedings of the 24th IEEE International Conference on Computer Communications},
   8680         year = {2005},
   8681         month = mar,
   8682         pages = {2578--2589},
   8683         publisher = {IEEE Computer Society},
   8684         organization = {IEEE Computer Society},
   8685         address = {Miami, FL, USA},
   8686         abstract = {Selfish overlay networks consist of autonomous nodes that develop their own strategies by optimizing towards their local objectives and self-interests, rather than following prescribed protocols. It is thus important to regulate the behavior of selfish nodes, so that system-wide properties are optimized. In this paper, we investigate the problem of bandwidth allocation in overlay networks, and propose to use a market-driven approach to regulate the behavior of selfish nodes that either provide or consume services. In such markets, consumers of services select the best service providers, taking into account both the performance and the price of the service. On the other hand, service providers are encouraged to strategically decide their respective prices in a pricing game, in order to maximize their economic revenues and minimize losses in the long run. In order to overcome the limitations of previous models towards similar objectives, we design a decentralized algorithm that uses reinforcement learning to help selfish nodes to incrementally adapt to the local market, and to make optimized strategic decisions based on past experiences. We have simulated our proposed algorithm in randomly generated overlay networks, and have shown that the behavior of selfish nodes converges to their optimal strategies, and resource allocations in the entire overlay are near-optimal, and efficiently adapts to the dynamics of overlay networks},
   8687         www_section = {bandwidth allocation, economics, market-driven, prescribed protocol, selfish overlay network},
   8688         isbn = {0-7803-8968-9 },
   8689         doi = {http://dx.doi.org/10.1109/INFCOM.2005.1498542},
   8690         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Market-driven\%20bandwidth\%20allocation.pdf},
   8691         author = {Weihong Wang and Baochun Li}
   8692 }
   8693 @conference {Guo:2005:MAM:1251086.1251090,
   8694         title = {Measurements, analysis, and modeling of BitTorrent-like systems},
   8695         booktitle = {IMC'05. Proceedings of the 5th ACM SIGCOMM Conference on Internet Measurement},
   8696         series = {IMC '05},
   8697         year = {2005},
   8698         month = oct,
   8699         pages = {4--4},
   8700         publisher = {USENIX Association},
   8701         organization = {USENIX Association},
   8702         address = {Berkeley, CA, USA},
   8703         abstract = {Existing studies on BitTorrent systems are single-torrent based, while more than 85\% of all peers participate in multiple torrents according to our trace analysis. In addition, these studies are not sufficiently insightful and accurate even for single-torrent models, due to some unrealistic assumptions. Our analysis of representative Bit-Torrent traffic provides several new findings regarding the limitations of BitTorrent systems: (1) Due to the exponentially decreasing peer arrival rate in reality, service availability in such systems becomes poor quickly, after which it is difficult for the file to be located and downloaded. (2) Client performance in the BitTorrent-like systems is unstable, and fluctuates widely with the peer population. (3) Existing systems could provide unfair services to peers, where peers with high downloading speed tend to download more and upload less. In this paper, we study these limitations on torrent evolution in realistic environments. Motivated by the analysis and modeling results, we further build a graph based multi-torrent model to study inter-torrent collaboration. Our model quantitatively provides strong motivation for inter-torrent collaboration instead of directly stimulating seeds to stay longer. We also discuss a system design to show the feasibility of multi-torrent collaboration},
   8704         www_section = {bittorrent system, intertorrent collaboration, multi-torrent collaboration, multiple torrents},
   8705         url = {http://www.usenix.org/events/imc05/tech/full_papers/guo/guo_html/},
   8706         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2705\%20-\%20Measurement\%2C\%20analysis\%20and\%20modeling\%20of\%20BitTorrent-like\%20systems.pdf},
   8707         author = {Guo, Lei and Chen, Songqing and Xiao, Zhen and Tan, Enhua and Ding, Xiaoning and Zhang, Xiaodong}
   8708 }
   8709 @conference {kutzner05overnet,
   8710         title = {Measuring Large Overlay Networks--The Overnet Example},
   8711         booktitle = {Konferenzband der 14. Fachtagung Kommunikation in Verteilten Systemen (KiVS 2005)},
   8712         year = {2005},
   8713         type = {publication},
   8714         address = {Kaiserslautern, Germany},
   8715         abstract = {Peer-to-peer overlay networks have grown significantly in size and sophistication over the last years. Meanwhile, distributed hash tables (DHT) provide efficient means to create global scale overlay networks on top of which various applications can be built. Although filesharing still is the most prominent example, other applications are well conceivable. In order to rationally design such applications, it is important to know (and understand) the properties of the overlay networks as seen from the respective application.
   8716 This paper reports the results from a two week measurement of the entire Overnet network, the currently most widely deployed DHT-based overlay. We describe both, the design choices that made that measurement feasible and the results from the measurement itself. Besides the basic determination of network size, node availability and node distribution, we found unexpected results for the overlay latency distribution},
   8717         www_section = {distributed hash table, overlay networks, P2P},
   8718         isbn = {978-3-540-24473-8},
   8719         doi = {10.1007/b138861},
   8720         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   8721         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner05overnet.pdf},
   8722         author = {Kendy Kutzner and Thomas Fuhrmann}
   8723 }
   8724 @conference {pet05-serjantov,
   8725         title = {Message Splitting Against the Partial Adversary},
   8726         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)},
   8727         year = {2005},
   8728         month = {May},
   8729         pages = {26--39},
   8730         publisher = {Springer Berlin / Heidelberg},
   8731         organization = {Springer Berlin / Heidelberg},
   8732         abstract = {We review threat models used in the evaluation of anonymity systems' vulnerability to traffic analysis. We then suggest that, under the partial adversary model, if multiple packets have to be sent through these systems, more anonymity can be achieved if senders route the packets via different paths. This is in contrast to the normal technique of using the same path for them all. We comment on the implications of this for message-based and connection-based anonymity systems. We then proceed to examine the only remaining traffic analysis attack -- one which considers the entire system as a black box. We show that it is more difficult to execute than the literature suggests, and attempt to empirically estimate the parameters of the Mixmaster and the Mixminion systems needed in order to successfully execute the attack},
   8733         www_section = {anonymity, traffic analysis},
   8734         isbn = {978-3-540-34745-3},
   8735         doi = {10.1007/11767831},
   8736         url = {http://www.springerlink.com/content/375x2pv385388h86/},
   8737         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-serjantov.pdf},
   8738         author = {Andrei Serjantov and Steven J. Murdoch}
   8739 }
   8740 @conference {pet05-camenisch,
   8741         title = {Mix-network with Stronger Security},
   8742         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)},
   8743         year = {2005},
   8744         month = {May},
   8745         pages = {128--147},
   8746         publisher = {Springer Berlin / Heidelberg},
   8747         organization = {Springer Berlin / Heidelberg},
   8748         abstract = {We consider a mix-network as a cryptographic primitive that provides anonymity. A mix-network takes as input a number of ciphertexts and outputs a random shuffle of the corresponding plaintexts. Common applications of mix-nets are electronic voting and anonymous network traffic. In this paper, we present a novel construction of a mix-network, which is based on shuffling ElGamal encryptions. Our scheme is the first mix-net to meet the strongest security requirements: it is robust and secure against chosen ciphertext attacks as well as against active attacks in the Universally Composable model. Our construction allows one to securely execute several mix-net instances concurrently, as well as to run multiple mix-sessions without changing a set of keys. Nevertheless, the scheme is efficient: it requires a linear work (in the number of input messages) per mix-server},
   8749         www_section = {anonymity, electronic voting},
   8750         isbn = {978-3-540-34745-3},
   8751         doi = {10.1007/11767831},
   8752         url = {http://www.springerlink.com/content/v32m5122127m78v0/},
   8753         author = {Jan Camenisch and Anton Mityagin}
   8754 }
   8755 @conference { gkantsidis:network,
   8756         title = {Network coding for large scale content distribution},
   8757         booktitle = {INFOCOM'05. Proceedings of the 24th IEEE International Conference on Computer Communications},
   8758         year = {2005},
   8759         month = mar,
   8760         pages = {2235--2245},
   8761         publisher = {IEEE Computer Society},
   8762         organization = {IEEE Computer Society},
   8763         address = {Miami, FL, USA},
   8764         abstract = {We propose a new scheme for content distribution of large files that is based on network coding. With network coding, each node of the distribution network is able to generate and transmit encoded blocks of information. The randomization introduced by the coding process eases the scheduling of block propagation, and, thus, makes the distribution more efficient. This is particularly important in large unstructured overlay networks, where the nodes need to make block forwarding decisions based on local information only. We compare network coding to other schemes that transmit unencoded information (i.e. blocks of the original file) and, also, to schemes in which only the source is allowed to generate and transmit encoded packets. We study the performance of network coding in heterogeneous networks with dynamic node arrival and departure patterns, clustered topologies, and when incentive mechanisms to discourage free-riding are in place. We demonstrate through simulations of scenarios of practical interest that the expected file download time improves by more than 20-30\% with network coding compared to coding at the server only and, by more than 2-3 times compared to sending unencoded information. Moreover, we show that network coding improves the robustness of the system and is able to smoothly handle extreme situations where the server and nodes leave the system},
   8765         www_section = {large scale content distribution, network coding},
   8766         isbn = {0-7803-8968-9 },
   8767         doi = {http://dx.doi.org/10.1109/INFCOM.2005.1498511},
   8768         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Network\%20coding\%20for\%20large\%20scale\%20content\%20distribution.pdf},
   8769         author = {Christos Gkantsidis and Pablo Rodriguez}
   8770 }
   8771 @conference {Sanghavi:2005:NMF:1080192.1080200,
   8772         title = {A new mechanism for the free-rider problem},
   8773         booktitle = {P2PEcon'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems},
   8774         series = {P2PECON '05},
   8775         year = {2005},
   8776         month = aug,
   8777         pages = {122--127},
   8778         publisher = {ACM},
   8779         organization = {ACM},
   8780         address = {Philadelphia, Pennsylvania, USA},
   8781         abstract = {The free-rider problem arises in the provisioning of public resources, when users of the resource have to contribute towards the cost of production. Selfish users may have a tendency to misrepresent preferences -- so as to minimize individual contributions -- leading to inefficient levels of production of the resource. Groves and Loeb formulated a classic model capturing this problem, and proposed (what later came to be known as) the VCG mechanism as a solution. However, in the presence of heterogeneous users and communication constraints, or in decentralized settings, implementing this mechanism places an unrealistic communication burden. In this paper we propose a class of alternative mechanisms for the same problem as considered by Groves and Loeb, but with the added constraint of severely limited communication between users and the provisioning authority. When these mechanisms are used, efficient production is ensured as a Nash equilibrium outcome, for a broad class of users. Furthermore, a natural bid update strategy is shown to globally converge to efficient Nash equilibria. An extension to multiple public goods with inter-related valuations is also presented},
   8782         www_section = {free-rider, problem},
   8783         isbn = {1-59593-026-4},
   8784         doi = {http://doi.acm.org/10.1145/1080192.1080200},
   8785         url = {http://doi.acm.org/10.1145/1080192.1080200},
   8786         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PEcon\%2705\%20-\%20A\%20new\%20mechanism\%20for\%20the\%20free-rider\%20problem.pdf},
   8787         author = {Sanghavi, Sujay and Hajek, Bruce}
   8788 }
   8789 @conference {1251532,
   8790         title = {Non-transitive connectivity and DHTs},
   8791         booktitle = {WORLDS'05: Proceedings of the 2nd conference on Real, Large Distributed Systems},
   8792         year = {2005},
   8793         pages = {55--60},
   8794         publisher = {USENIX Association},
   8795         organization = {USENIX Association},
   8796         address = {Berkeley, CA, USA},
   8797         abstract = {The most basic functionality of a distributed hash table, or DHT, is to partition a key space across the set of nodes in a distributed system such that all nodes agree on the partitioning. For example, the Chord DHT assigns each node},
   8798         www_section = {Chord, distributed hash table},
   8799         url = {http://portal.acm.org/citation.cfm?id=1251532$\#$},
   8800         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ntr-worlds05.pdf},
   8801         author = {Michael J. Freedman and Lakshminarayanan, Karthik and Rhea, Sean C. and Ion Stoica}
   8802 }
   8803 @booklet {cryptoeprint:2005:394,
   8804         title = {Obfuscated Ciphertext Mixing},
   8805         year = {2005},
   8806         month = {November},
   8807         abstract = {Mixnets are a type of anonymous channel composed of a handful of trustees that, each in turn, shu$\#$e and rerandomize a batch ciphertexts. For applications that require verifiability, each trustee provides a proof of correct mixing. Though mixnets have recently been made quite e$\#$cient, they still require secret computation and proof generation after the mixing process.
   8808 
   8809 We introduce and implement Obfuscated Ciphertext Mixing, the obfuscation of a mixnet program. Using this technique, all proofs can be performed before the mixing process, even before the inputs are available. In addition, the mixing program does not need to be secret: anyone can publicly compute the shuffle (though not the decryption). We frame this functionality in the strongest obfuscation setting proposed by Barak et. al. [4], tweaked for the public-key setting. For applications where the secrecy of the shuffle permutation is particularly important (e.g. voting), we also consider the Distributed Obfuscation of a Mixer, where multiple trustees cooperate to generate an obfuscated mixer program such that no single trustee knows the composed shuffle permutation},
   8810         www_section = {obfuscated ciphertext mixing},
   8811         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.6592},
   8812         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cryptoeprint-2005-394.pdf},
   8813         author = {Ben Adida and Douglas Wikstr{\"o}m}
   8814 }
   8815 @conference {Garcia05off-linekarma:,
   8816         title = {Off-line Karma: A Decentralized Currency for Peer-to-peer and Grid Applications},
   8817         booktitle = {ACNS'05. 3rd Applied Cryptography and Network Security Conference},
   8818         series = {Lecture Notes in Computer Science},
   8819         volume = {3531},
   8820         year = {2005},
   8821         month = jun,
   8822         pages = {364--377},
   8823         publisher = {Springer},
   8824         organization = {Springer},
   8825         address = {New York, NY, USA},
   8826         abstract = {Peer-to-peer (P2P) and grid systems allow their users to exchange information and share resources, with little centralised or hierarchical control, instead relying on the fairness of the users to make roughly as much resources available as they use. To enforce this balance, some kind of currency or barter (called karma) is needed that must be exchanged for resources thus limiting abuse. We present a completely decentralised, off-line karma implementation for P2P and grid systems, that detects double-spending and other types of fraud under varying adversarial scenarios. The system is based on tracing the spending pattern of coins, and distributing the normally central role of a bank over a predetermined, but random, selection of nodes. The system is designed to allow nodes to join and leave the system at arbitrary times},
   8827         www_section = {decentralized, free-riding, GRID, micropayments, peer-to-peer networking, security},
   8828         doi = {10.1007/11496137_25},
   8829         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACNS\%2705\%20-\%20Garcia\%20\%26\%20Hoepman\%20-\%20Off-line\%20Karma.pdf},
   8830         author = {Flavio D. Garcia and Jaap-Henk Hoepman}
   8831 }
   8832 @conference {2005_8,
   8833         title = {OpenDHT: a public DHT service and its uses},
   8834         booktitle = {Proceedings of the 2005 conference on Applications, technologies, architectures, and protocols for computer communications},
   8835         series = {SIGCOMM '05},
   8836         year = {2005},
   8837         pages = {73--84},
   8838         publisher = {ACM},
   8839         organization = {ACM},
   8840         address = {New York, NY, USA},
   8841         www_section = {distributed hash table, openDHT, peer-to-peer, resource allocation},
   8842         isbn = {1-59593-009-4},
   8843         doi = {10.1145/1080091.1080102},
   8844         author = {unknown},
   8845         url = {http://doi.acm.org/10.1145/1080091.1080102},
   8846         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/openDHT.pdf}
   8847 }
   8848 @article {Feldman:2005:OFB:1120717.1120723,
   8849         title = {Overcoming free-riding behavior in peer-to-peer systems},
   8850         journal = {ACM SIGecom Exchanges},
   8851         volume = {5},
   8852         year = {2005},
   8853         month = jul,
   8854         pages = {41--50},
   8855         publisher = {ACM},
   8856         address = {New York, NY, USA},
   8857         abstract = {While the fundamental premise of peer-to-peer (P2P) systems is that of voluntary resource sharing among individual peers, there is an inherent tension between individual rationality and collective welfare that threatens the viability of these systems. This paper surveys recent research at the intersection of economics and computer science that targets the design of distributed systems consisting of rational participants with diverse and selfish interests. In particular, we discuss major findings and open questions related to free-riding in P2P systems: factors affecting the degree of free-riding, incentive mechanisms to encourage user cooperation, and challenges in the design of incentive mechanisms for P2P systems},
   8858         www_section = {algorithms, cooperation, design, economics, game-theory, hidden-action, hidden-information, incentives, peer-to-peer networking, performance, reliability},
   8859         issn = {1551-9031},
   8860         doi = {http://doi.acm.org/10.1145/1120717.1120723},
   8861         url = {http://doi.acm.org/10.1145/1120717.1120723},
   8862         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGecom\%20Exch.\%20\%285\%29\%20-\%20Overcoming\%20free-riding\%20behavior.pdf},
   8863         author = {Michal Feldman and John Chuang}
   8864 }
   8865 @article {2005_9,
   8866         title = {P2P Contracts: a Framework for Resource and Service Exchange},
   8867         journal = {FGCS. Future Generations Computer Systems  },
   8868         volume = {21},
   8869         year = {2005},
   8870         month = mar,
   8871         pages = {333--347},
   8872         abstract = {A crucial aspect of Peer-to-Peer (P2P) systems is that of providing incentives for users to contribute their resources to the system. Without such incentives, empirical data show that a majority of the participants act asfree riders. As a result, a substantial amount of resource goes untapped, and, frequently, P2P systems devolve into client-server systems with attendant issues of performance under high load. We propose to address the free rider problem by introducing the notion of a P2P contract. In it, peers are made aware of the benefits they receive from the system as a function of their contributions. In this paper, we first describe a utility-based framework to determine the components of the contract and formulate the associated resource allocation problem. We consider the resource allocation problem for a flash crowd scenario and show how the contract mechanism implemented using a centralized server can be used to quickly create pseudoservers that can serve out the requests. We then study a decentralized implementation of the P2P contract scheme in which each node implements the contract based on local demand. We show that in such a system, other than contributing storage and bandwidth to serve out requests, it is also important that peer nodes function as application-level routers to connect pools of available pseudoservers. We study the performance of the distributed implementation with respect to the various parameters including the terms of the contract and the triggers to create pseudoservers and routers},
   8873         www_section = {contracts, framework, P2P, peer-to-peer networking, resource exchange, service exchange},
   8874         issn = {0167-739X},
   8875         doi = {10.1016/j.future.2004.04.013 },
   8876         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FGCS\%20-\%20P2P\%20Contracts\%3A\%20a\%20Framework\%20for\%20Resource\%20and\%20Service\%20Exchange.pdf},
   8877         author = {Dipak Ghosal and Benjamin K. Poon and Keith Kong}
   8878 }
   8879 @conference { busca:pastis:,
   8880         title = {Pastis: A Highly-Scalable Multi-user Peer-to-Peer File System},
   8881         booktitle = {Euro-Par'05 Parallel Processing},
   8882         year = {2005},
   8883         month = sep,
   8884         pages = {1173--1182},
   8885         publisher = {Springer-Verlag},
   8886         organization = {Springer-Verlag},
   8887         address = {Lisboa, Portugal},
   8888         abstract = {We introduce Pastis, a completely decentralized multi-user read-write peer-to-peer file system. In Pastis every file is described by a modifiable inode-like structure which contains the addresses of the immutable blocks in which the file contents are stored. All data are stored using the Past distributed hash table (DHT), which we have modified in order to reduce the number of network messages it generates, thus optimizing replica retrieval.
   8889 Pastis' design is simple compared to other existing systems, as it does not require complex algorithms like Byzantine-fault tolerant (BFT) replication or a central administrative authority. It is also highly scalable in terms of the number of network nodes and users sharing a given file or portion of the file system. Furthermore, Pastis takes advantage of the fault tolerance and good locality properties of its underlying storage layer, the Past DHT.
   8890 We have developed a prototype based on the FreePastry open-source implementation of the Past DHT. We have used this prototype to evaluate several characteristics of our file system design. Supporting the close-to-open consistency model, plus a variant of the read-your-writes model, our prototype shows that Pastis is between 1.4 to 1.8 times slower than NFS. In comparison, Ivy and Oceanstore are between two to three times slower than NFS},
   8891         www_section = {distributed hash table, multi-user, Pastis, peer-to-peer file system, read-write},
   8892         doi = {10.1007/11549468_128},
   8893         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Euro-Par\%2705\%20-\%20Pastis.pdf},
   8894         author = {Jean-Michel Busca and Fabio Picconi and Pierre Sens}
   8895 }
   8896 @conference {Massachusetts05peer-to-peercommunication,
   8897         title = {Peer-to-Peer Communication Across Network Address Translators},
   8898         booktitle = {ATEC05. Proceedings of the USENIX Annual Technical Conference},
   8899         year = {2005},
   8900         month = apr,
   8901         pages = {179--192},
   8902         publisher = {USENIX Association},
   8903         organization = {USENIX Association},
   8904         address = {Anaheim, CA},
   8905         abstract = {Network Address Translation (NAT) causes well-known difficulties for peer-to-peer (P2P) communication, since the peers involved may not be reachable at any globally valid IP address. Several NAT traversal techniques are known, but their documentation is slim, and data about their robustness or relative merits is slimmer. This paper documents and analyzes one of the simplest but most robust and practical NAT traversal techniques, commonly known as hole punching. Hole punching is moderately well-understood for UDP communication, but we show how it can be reliably used to set up peer-to-peer TCP streams as well. After gathering data on the reliability of this technique on a wide variety of deployed NATs, we nd that about 82\% of the NATs tested support hole punching for UDP, and about 64\% support hole punching for TCP streams. As NAT vendors become increasingly conscious of the needs of important P2P applications such as Voice over IP and online gaming protocols, support for hole punching is likely to increase in the future},
   8906         www_section = {communication network, ip address, NAT, nat traversal techniques, network address translation, P2P, peer-to-peer networking},
   8907         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.59.6799\&rep=rep1\&type=pdf},
   8908         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.59.6799.pdf},
   8909         author = {Pyda Srisuresh and Bryan Ford and Dan Kegel}
   8910 }
   8911 @booklet {Fuhrmann_aplatform,
   8912         title = {A platform for lab exercises in sensor networks},
   8913         year = {2005},
   8914         abstract = {Programming of and experiences with sensor network nodes are about to enter the curricula of technical universities. Often however, practical obstacles complicate the implementation of a didactic concept. In this paper we present our approach that uses a Java virtual machine to decouple experiments with algorithm and protocol concepts from the odds of embedded system programming. This concept enables students to load Java classes via an SD-card into a sensor node. An LC display provides detailed information if the program aborts due to bugs},
   8915         www_section = {sensor networks},
   8916         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.72.8036},
   8917         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.72.8036.pdf},
   8918         author = {Thomas Fuhrmann and Till Harbaum}
   8919 }
   8920 @conference {Ali:2005:PTA:1082473.1082631,
   8921         title = {Preprocessing techniques for accelerating the DCOP algorithm ADOPT},
   8922         booktitle = {AAMAS'05--Proceedings of the fourth international joint conference on Autonomous agents and multiagent systems},
   8923         series = {AAMAS '05},
   8924         year = {2005},
   8925         month = jul,
   8926         pages = {1041--1048},
   8927         publisher = {ACM},
   8928         organization = {ACM},
   8929         address = {Utrecht, Netherlands},
   8930         abstract = {Methods for solving Distributed Constraint Optimization Problems (DCOP) have emerged as key techniques for distributed reasoning. Yet, their application faces significant hurdles in many multiagent domains due to their inefficiency. Preprocessing techniques have successfully been used to speed up algorithms for centralized constraint satisfaction problems. This paper introduces a framework of different preprocessing techniques that are based on dynamic programming and speed up ADOPT, an asynchronous complete and optimal DCOP algorithm. We investigate when preprocessing is useful and which factors influence the resulting speedups in two DCOP domains, namely graph coloring and distributed sensor networks. Our experimental results demonstrate that our preprocessing techniques are fast and can speed up ADOPT by an order of magnitude},
   8931         www_section = {ADOPT algorithm, DCOP, distributed constraint optimization},
   8932         isbn = {1-59593-093-0},
   8933         doi = {10.1145/1082473.1082631},
   8934         url = {http://doi.acm.org/10.1145/1082473.1082631},
   8935         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAMAS\%2705\%20-\%20Accelerating\%20the\%20DCOP\%20algorithm\%20ADOPT.pdf},
   8936         author = {Ali, Syed and Koenig, Sven and Tambe, Milind}
   8937 }
   8938 @article {2005_10,
   8939         title = {Privacy Practices of Internet Users: Self-reports Versus Observed Behavior},
   8940         journal = {Int. J. Hum.-Comput. Stud},
   8941         volume = {63},
   8942         year = {2005},
   8943         pages = {203--227},
   8944         abstract = {Several recent surveys conclude that people are concerned about privacy and consider it to be an important factor in their online decision making. This paper reports on a study in which (1) user concerns were analysed more deeply and (2) what users said was contrasted with what they did in an experimental e-commerce scenario. Eleven independent variables were shown to affect the online behavior of at least some groups of users. Most significant were trust marks present on web pages and the existence of a privacy policy, though users seldom consulted the policy when one existed. We also find that many users have inaccurate perceptions of their own knowledge about privacy technology and vulnerabilities, and that important user groups, like those similar to the Westin "privacy fundamentalists", do not appear to form a cohesive group for privacy-related decision making.In this study we adopt an experimental economic research paradigm, a method for examining user behavior which challenges the current emphasis on survey data. We discuss these issues and the implications of our results on user interpretation of trust marks and interaction design. Although broad policy implications are beyond the scope of this paper, we conclude by questioning the application of the ethical/legal doctrine of informed consent to online transactions in the light of the evidence that users frequently do not consult privacy policies},
   8945         www_section = {decision-making, design, e-commerce, economic models, policy, privacy, survey},
   8946         issn = {1071-5819},
   8947         doi = {10.1016/j.ijhcs.2005.04.019},
   8948         url = {http://dx.doi.org/10.1016/j.ijhcs.2005.04.019},
   8949         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPractices2005Jensen.pdf},
   8950         author = {Jensen, Carlos and Potts, Colin and Jensen, Christian}
   8951 }
   8952 @conference {pet05-bissias,
   8953         title = {Privacy Vulnerabilities in Encrypted HTTP Streams},
   8954         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)},
   8955         year = {2005},
   8956         month = {May},
   8957         pages = {1--11},
   8958         publisher = {Springer Berlin / Heidelberg},
   8959         organization = {Springer Berlin / Heidelberg},
   8960         abstract = {Encrypting traffic does not prevent an attacker from performing some types of traffic analysis. We present a straightforward traffic analysis attack against encrypted HTTP streams that is surprisingly effective in identifying the source of the traffic. An attacker starts by creating a profile of the statistical characteristics of web requests from interesting sites, including distributions of packet sizes and inter-arrival times. Later, candidate encrypted streams are compared against these profiles. In our evaluations using real traffic, we find that many web sites are subject to this attack. With a training period of 24 hours and a 1 hour delay afterwards, the attack achieves only 23\% accuracy. However, an attacker can easily pre-determine which of trained sites are easily identifiable. Accordingly, against 25 such sites, the attack achieves 40\% accuracy; with three guesses, the attack achieves 100\% accuracy for our data. Longer delays after training decrease accuracy, but not substantially. We also propose some countermeasures and improvements to our current method. Previous work analyzed SSL traffic to a proxy, taking advantage of a known flaw in SSL that reveals the length of each web object. In contrast, we exploit the statistical characteristics of web streams that are encrypted as a single flow, which is the case with WEP/WPA, IPsec, and SSH tunnels},
   8961         www_section = {privacy, traffic analysis},
   8962         isbn = {978-3-540-34745-3},
   8963         doi = {10.1007/11767831},
   8964         url = {http://www.springerlink.com/content/1062w684754754h4/},
   8965         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-bissias.pdf},
   8966         author = {George Dean Bissias and Marc Liberatore and Brian Neil Levine}
   8967 }
   8968 @book {2005_11,
   8969         title = {Privacy-Preserving Set Operations},
   8970         booktitle = {Advances in Cryptology -- CRYPTO 2005},
   8971         series = {Lecture Notes in Computer Science},
   8972         volume = {3621},
   8973         year = {2005},
   8974         pages = {241--257},
   8975         publisher = {Springer Berlin Heidelberg},
   8976         organization = {Springer Berlin Heidelberg},
   8977         abstract = {In many important applications, a collection of mutually distrustful parties must perform private computation over multisets. Each party's input to the function is his private input multiset. In order to protect these private sets, the players perform privacy-preserving computation; that is, no party learns more information about other parties' private input sets than what can be deduced from the result. In this paper, we propose efficient techniques for privacy-preserving operations on multisets. By building a framework of multiset operations, employing the mathematical properties of polynomials, we design efficient, secure, and composable methods to enable privacy-preserving computation of the union, intersection, and element reduction operations. We apply these techniques to a wide range of practical problems, achieving more efficient results than those of previous work},
   8978         isbn = {978-3-540-28114-6},
   8979         doi = {10.1007/11535218_15},
   8980         url = {http://dx.doi.org/10.1007/11535218_15},
   8981         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreservingSetOperations2005Kissner.pdf},
   8982         author = {Kissner, Lea and Song, Dawn},
   8983         editor = {Shoup, Victor}
   8984 }
   8985 @book {2005_12,
   8986         title = {On Private Scalar Product Computation for Privacy-Preserving Data Mining},
   8987         booktitle = {Information Security and Cryptology -- ICISC 2004},
   8988         series = {Lecture Notes in Computer Science},
   8989         volume = {3506},
   8990         year = {2005},
   8991         pages = {104--120},
   8992         publisher = {Springer Berlin Heidelberg},
   8993         organization = {Springer Berlin Heidelberg},
   8994         abstract = {In mining and integrating data from multiple sources, there are many privacy and security issues. In several different contexts, the security of the full privacy-preserving data mining protocol depends on the security of the underlying private scalar product protocol. We show that two of the private scalar product protocols, one of which was proposed in a leading data mining conference, are insecure. We then describe a provably private scalar product protocol that is based on homomorphic encryption and improve its efficiency so that it can also be used on massive datasets},
   8995         www_section = {Privacy-preserving data mining, private scalar product protocol, vertically partitioned frequent pattern mining},
   8996         isbn = {978-3-540-26226-8},
   8997         doi = {10.1007/11496618_9},
   8998         url = {http://dx.doi.org/10.1007/11496618_9},
   8999         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateScalarProduct2004Goethals.pdf},
   9000         author = {Goethals, Bart and Laur, Sven and Lipmaa, Helger and Mielik{\"a}inen, Taneli},
   9001         editor = {Park, Choon-sik and Chee, Seongtaek}
   9002 }
   9003 @conference {GHPvR05,
   9004         title = {Provable Anonymity},
   9005         booktitle = {Proceedings of the 3rd ACM Workshop on Formal Methods in Security Engineering (FMSE05)},
   9006         year = {2005},
   9007         month = {November},
   9008         address = {Alexandria, VA, USA},
   9009         abstract = {This paper provides a formal framework for the analysis of information hiding properties of anonymous communication protocols in terms of epistemic logic.The key ingredient is our notion of observational equivalence, which is based on the cryptographic structure of messages and relations between otherwise random looking messages. Two runs are considered observationally equivalent if a spy cannot discover any meaningful distinction between them.We illustrate our approach by proving sender anonymity and unlinkability for two anonymizing protocols, Onion Routing and Crowds. Moreover, we consider a version of Onion Routing in which we inject a subtle error and show how our framework is capable of capturing this flaw},
   9010         www_section = {cryptography, onion routing},
   9011         isbn = {1-59593-231-3},
   9012         doi = {10.1145/1103576.1103585},
   9013         url = {http://portal.acm.org/citation.cfm?id=1103576.1103585},
   9014         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GHPvR05.pdf},
   9015         author = {Flavio D. Garcia and Ichiro Hasuo and Wolter Pieters and Peter van Rossum}
   9016 }
   9017 @conference {ih05-Klonowski,
   9018         title = {Provable Anonymity for Networks of Mixes},
   9019         booktitle = {Proceedings of Information Hiding Workshop (IH 2005)},
   9020         year = {2005},
   9021         month = {June},
   9022         pages = {26--38},
   9023         publisher = {Springer Berlin / Heidelberg},
   9024         organization = {Springer Berlin / Heidelberg},
   9025         abstract = {We analyze networks of mixes used for providing untraceable communication. We consider a network consisting of k mixes working in parallel and exchanging the outputs -- which is the most natural architecture for composing mixes of a certain size into networks able to mix a larger number of inputs at once. We prove that after O(log k) rounds the network considered provides a fair level of privacy protection for any number of messages. No mathematical proof of this kind has been published before. We show that if at least one of server is corrupted we need substantially more rounds to meet the same requirements of privacy protection},
   9026         www_section = {anonymity, coupling, Markov chain, rapid mixing},
   9027         isbn = {978-3-540-29039-1},
   9028         doi = {10.1007/11558859},
   9029         url = {http://www.springerlink.com/content/777769630v335773/},
   9030         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-Klonowski.pdf},
   9031         author = {Marek Klonowski and Miroslaw Kutylowski}
   9032 }
   9033 @conference {cramer05pns,
   9034         title = {Proximity Neighbor Selection for a DHT in Wireless Multi-Hop Networks},
   9035         booktitle = {Proceedings of the 5th IEEE International Conference on Peer-to-Peer Computing},
   9036         year = {2005},
   9037         pages = {3--10},
   9038         publisher = {IEEE Computer Society  Washington, DC, USA},
   9039         organization = {IEEE Computer Society  Washington, DC, USA},
   9040         type = {publication},
   9041         address = {Konstanz, Germany},
   9042         abstract = {A mobile ad hoc network (MANET) is a multi-hop wireless network having no infrastructure. Thus, the mobile nodes have to perform basic control tasks, such as routing, and higher-level tasks, such as service discovery, in a cooperative and distributed way. Originally conceived as a peer-to-peer application for the Internet, distributed hash tables (DHTs) are data structures offering both, scalable routing and a convenient abstraction for the design of applications in large, dynamic networks. Hence, DHTs and MANETs seem to be a good match, and both have to cope with dynamic, self-organizing networks. DHTs form a virtual control structure oblivious to the underlying network. Several techniques to improve the performance of DHTs in wired networks have been established in the literature. A particularly efficient one is proximity neighbor selection (PNS). PNS has to continuously adapt the virtual network to the physical network, incurring control traffic. The applicability of PNS and DHTs for MANETs commonly is regarded as hard because of this control traffic,the complexity of the adaptation algorithms, and the dynamics of a MANET. Using simulations supported by analytical methods, we show that by making a minor addition to PNS, it is also applicable for MANETs. We additionally show that the specifics of a MANET make PNS an easy exercise there. Thus, DHTs deliver good performance in MANETs},
   9043         www_section = {distributed hash table, multi-hop networks, proximity neighbor selection},
   9044         isbn = {0-7695-2376-5},
   9045         doi = {10.1109/P2P.2005.28},
   9046         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9047         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer05pns.pdf},
   9048         author = {Cramer, Curt and Thomas Fuhrmann}
   9049 }
   9050 @conference {sassaman:wpes2005,
   9051         title = {The Pynchon Gate: A Secure Method of Pseudonymous Mail Retrieval},
   9052         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2005)},
   9053         year = {2005},
   9054         month = {November},
   9055         publisher = {ACM  New York, NY, USA},
   9056         organization = {ACM  New York, NY, USA},
   9057         address = {Arlington, VA, USA},
   9058         abstract = {We describe the Pynchon Gate, a practical pseudonymous message retrieval system. Our design uses a simple distributed-trust private information retrieval protocol to prevent adversaries from linking recipients to their pseudonyms, even when some of the infrastructure has been compromised. This approach resists global traffic analysis significantly better than existing deployed pseudonymous email solutions, at the cost of additional bandwidth. We examine security concerns raised by our model, and propose solutions},
   9059         www_section = {private information retrieval, pseudonym},
   9060         isbn = {1-59593-228-3},
   9061         doi = {10.1145/1102199.1102201},
   9062         url = {http://portal.acm.org/citation.cfm?id=1102199.1102201},
   9063         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sassaman-wpes2005.pdf},
   9064         author = {Len Sassaman and Bram Cohen and Nick Mathewson}
   9065 }
   9066 @conference {1524297,
   9067         title = {Query Forwarding Algorithm Supporting Initiator Anonymity in GNUnet},
   9068         booktitle = {Parallel and Distributed Systems, 2005. Proceedings. 11th International Conference on},
   9069         volume = {2},
   9070         year = {2005},
   9071         month = jul,
   9072         pages = {235--239},
   9073         abstract = {Anonymity in peer-to-peer network means that it is difficult to associate a particular communication with a sender or a recipient. Recently, anonymous peer-to-peer framework, called GNUnet, was developed. A primary feature of GNUnet is resistance to traffic-analysis. However, Kugler analyzed a routing protocol in GNUnet, and pointed out traceability of initiator. In this paper, we propose an alternative routing protocol applicable in GNUnet, which is resistant to Kugler's shortcut attacks},
   9074         www_section = {anonymity, GNUnet, routing, shortcut},
   9075         issn = {1521-9097},
   9076         doi = {10.1109/ICPADS.2005.246},
   9077         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kugler2.pdf},
   9078         author = {Tatara, Kohei and Hori, Y. and Sakurai, Kouichi}
   9079 }
   9080 @article {2005_13,
   9081         title = {A Quick Introduction to Bloom Filters},
   9082         year = {2005},
   9083         month = aug,
   9084         institution = {The GNUnet Project},
   9085         www_section = {Bloom filter, GNUnet},
   9086         journal = {unknown},
   9087         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bloomfilter.pdf},
   9088         author = {Christian Grothoff}
   9089 }
   9090 @conference {HanLLHP05,
   9091         title = {A Random Walk Based Anonymous Peer-to-Peer Protocol Design},
   9092         booktitle = {Proceedings of ICCNMC},
   9093         year = {2005},
   9094         pages = {143--152},
   9095         publisher = {Springer Berlin / Heidelberg},
   9096         organization = {Springer Berlin / Heidelberg},
   9097         abstract = {Anonymity has been one of the most challenging issues in Ad Hoc environment such as P2P systems. In this paper, we propose an anonymous protocol called Random Walk based Anonymous Protocol (RWAP), in decentralized P2P systems. We evaluate RWAP by comprehensive trace driven simulations. Results show that RWAP significantly reduces traffic cost and encryption overhead compared with existing approaches},
   9098         www_section = {anonymity, P2P, RWAP},
   9099         isbn = {978-3-540-28102-3},
   9100         doi = {10.1007/11534310},
   9101         url = {http://www.springerlink.com/content/0642hvq80b27vv1f/},
   9102         author = {Jinsong Han and Yunhao Liu and Li Lu and Lei Hu and Abhishek Patil}
   9103 }
   9104 @article { le2005,
   9105         title = {Reading File Metadata with extract and libextractor},
   9106         journal = {Linux Journal},
   9107         volume = {6-2005},
   9108         year = {2005},
   9109         month = {June},
   9110         publisher = {SCC},
   9111         www_section = {GNUnet, keywords, libextractor, metadata, search},
   9112         url = {http://www.linuxjournal.com/article/7552},
   9113         author = {Christian Grothoff}
   9114 }
   9115 @mastersthesis {Aspelund05retrivabilityof,
   9116         title = {Retrivability of data in ad-hoc backup},
   9117         volume = {Master},
   9118         year = {2005},
   9119         school = {Oslo University},
   9120         type = {Master thesis},
   9121         abstract = {This master thesis looks at aspects with backup of data and restore in ad-hoc networks. Ad-hoc networks are networks made between arbitrary nodes without any form of infrastructure or central control. Backup in such environments would have to rely on other nodes to keep backups. The key problem is knowing whom to trust. Backup in ad-hoc network is meant to be a method to offer extra security to data that is created outside of a controlled environment. The most important aspects of backup are the ability to retrieve data after it is lost from the original device. In this project an ad-hoc network is simulated, to measure how much of the data can be retrieved as a function of the size of the network. The distance to the data and how many of the distributed copies are available is measured. The network is simulated using User-mode Linux and the centrality and connectivity of the simulated network is measured. Finding the device that keeps your data when a restoration is needed can be like looking for a needle in a haystack. A simple solution to this is to not only rely on the ad-hoc network but also make it possible for devices that keep backups to upload data to others or back to a host that is available to the source itself},
   9122         www_section = {ad-hoc networks},
   9123         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.106.141},
   9124         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Retrivability_of_data_in_ad-hoc_backup.pdf},
   9125         author = {Trond Aspelund}
   9126 }
   9127 @booklet {1698181,
   9128         title = {Routing with Byzantine robustness},
   9129         year = {2005},
   9130         publisher = {Sun Microsystems, Inc},
   9131         address = {Mountain View, CA, USA},
   9132         abstract = {This paper describes how a network can continue to function in the presence of Byzantine failures. A Byzantine failure is one in which a node, instead of halting (as it would in a fail-stop failure), continues to operate, but incorrectly. It might lie about routing information, perform the routing algorithm itself flawlessly, but then fail to forward some class of packets correctly, or flood the network with garbage traffic. Our goal is to design a network so that as long as one nonfaulty path connects nonfaulty nodes A and B, they will be able to communicate, with some fair share of bandwidth, even if all the other components in the network are maximally malicious. We review work from 1988 that presented a network design that had that property, but required the network to be small enough so that every router could keep state proportional to n2, where n is the total number of nodes in the network. This would work for a network of size on the order of a thousand nodes, but to build a large network, we need to introduce hierarchy. This paper presents a new design, building on the original work, that works with hierarchical networks. This design not only defends against malicious routers, but because it guarantees fair allocation of resources, can mitigate against many other types of denial of service attacks},
   9133         www_section = {routing},
   9134         url = {http://portal.acm.org/citation.cfm?id=1698181$\#$},
   9135         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/smli_tr-2005-146.pdf},
   9136         author = {Perlman, Radia}
   9137 }
   9138 @conference {XuFZBCZ05,
   9139         title = {SAS: A Scalar Anonymous Communication System},
   9140         booktitle = {Proceedings of ICCNMC},
   9141         year = {2005},
   9142         pages = {452--461},
   9143         publisher = {Springer Berlin / Heidelberg},
   9144         organization = {Springer Berlin / Heidelberg},
   9145         abstract = {Anonymity technologies have gained more and more attention for communication privacy. In general, users obtain anonymity at a certain cost in an anonymous communication system, which uses rerouting to increase the system's robustness. However, a long rerouting path incurs large overhead and decreases the quality of service (QoS). In this paper, we propose the Scalar Anonymity System (SAS) in order to provide a tradeoff between anonymity and cost for different users with different requirements. In SAS, by selecting the level of anonymity, a user obtains the corresponding anonymity and QoS and also sustains the corresponding load of traffic rerouting for other users. Our theoretical analysis and simulation experiments verify the effectiveness of SAS},
   9146         www_section = {anonymity, privacy, QoS},
   9147         isbn = {978-3-540-28102-3},
   9148         doi = {10.1007/11534310},
   9149         url = {http://www.springerlink.com/content/9b2k6u5wval6cep1/},
   9150         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.66.7970.pdf},
   9151         author = {Hongyun Xu and Xinwen Fu and Ye Zhu and Riccardo Bettati and Jianer Chen and Wei Zhao}
   9152 }
   9153 @conference {Fuhrmann05scalablerouting,
   9154         title = {Scalable routing for networked sensors and actuators},
   9155         booktitle = {In Proceedings of the Second Annual IEEE Communications Society Conference on Sensor and Ad Hoc Communications and Networks},
   9156         year = {2005},
   9157         abstract = {The design of efficient routing protocols for ad hoc and sensor networks is challenging for several reasons: Physical network topology is random. Nodes have limited computation and memory capabilities. Energy and bisection bandwidth are scarce. Furthermore, in most settings, the lack of centralized components leaves all network control tasks to the nodes acting as decentralized peers. In this paper, we present a novel routing algorithm, scalable source routing (SSR), which is capable of memory and message efficient routing in large random networks. A guiding example is a community of 'digital homes ' where smart sensors and actuators are installed by laypersons. Such networks combine wireless ad-hoc and infrastructure networks, and lack a well-crafted network topology. Typically, the nodes do not have sufficient processing and memory resources to perform sophisticated routing algorithms. Flooding on the other hand is too bandwidthconsuming in the envisaged large-scale networks. SSR is a fully self-organizing routing protocol for such scenarios. It creates a virtual ring that links all nodes via predecessor/successor source routes. Additionally, each node possesses O(log N) short-cut source routes to nodes in exponentially increasing virtual ring distance. Like with the Chord overlay network, this ensures full connectivity within the network. Moreover, it provides a routing semantic which can efficiently support indirection schemes like i3. Memory and message efficiency are achieved by the introduction of a route cache together with a set of path manipulation rules that allow to produce near-to-optimal paths},
   9158         www_section = {scalable source routing, sensor networks, wireless sensor network},
   9159         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.6509},
   9160         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.67.6509.pdf},
   9161         author = {Thomas Fuhrmann}
   9162 }
   9163 @conference {1049775,
   9164         title = {Scalable Service Discovery for MANET},
   9165         booktitle = {PERCOM '05: Proceedings of the Third IEEE International Conference on Pervasive Computing and Communications},
   9166         year = {2005},
   9167         pages = {235--244},
   9168         publisher = {IEEE Computer Society},
   9169         organization = {IEEE Computer Society},
   9170         address = {Washington, DC, USA},
   9171         abstract = {Mobile Ad hoc NETworks (MANETs) conveniently complement infrastructure-based networks, allowing mobile nodes to spontaneously form a network and share their services, including bridging with other networks, either infrastructure-based or ad hoc. However, distributed service provisioning over MANETs requires adequate support for service discovery and invocation, due to the network{\'y}s dynamics and resource constraints of wireless nodes. While a number of existing service discovery protocols have shown to be effective for the wireless environment, these are mainly aimed at infrastructure-based and/or 1-hop ad hoc wireless networks. Some discovery protocols for MANETs have been proposed over the last couple of years but they induce significant traffic overhead, and are thus primarily suited for small-scale MANETs with few nodes. Building upon the evaluation of existing protocols, we introduce a scalable service discovery protocol for MANETs, which is based on the homogeneous and dynamic deployment of cooperating directories within the network. Scalability of our protocol comes from the minimization of the generatedtraffic, and the use of compact directory summaries that enable to efficiently locate the directory that most likely caches the description of a given service},
   9172         www_section = {ad-hoc networks, mobile Ad-hoc networks},
   9173         isbn = {0-7695-2299-8},
   9174         doi = {10.1109/PERCOM.2005.36},
   9175         url = {http://portal.acm.org/citation.cfm?id=1049775$\#$},
   9176         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.73.7247.pdf},
   9177         author = {Sailhan, Francoise and Valerie Issarny}
   9178 }
   9179 @booklet {Sandberg05searchingin,
   9180         title = {Searching in a Small World},
   9181         year = {2005},
   9182         abstract = {The small-world phenomenon, that the world's social network is tightly connected, and that any two people can be linked by a short chain of friends, has long been a subject of interest. Famously, the psychologist Stanley Milgram performed an experiment where he asked people to deliver a letter to a stranger by forwarding it to an acquaintance, who could forward it to one his acquaintances, and so on until the destination was reached. The results seemed to confirm that the small-world phenomenon is real. Recently it has been shown by Jon Kleinberg that in order to search in a network, that is to actually find the short paths in the manner of the Milgram experiment, a very special type of a graph model is needed. In this thesis, we present two ideas about searching in the small world stemming from Kleinberg's results. In the first we study the formation of networks of this type, attempting to see why the kind},
   9183         www_section = {small-world},
   9184         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.688},
   9185         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.101.688.pdf},
   9186         author = {Sandberg, Oskar}
   9187 }
   9188 @book {conf/p2p/GotzRW05,
   9189         title = {Selected DHT Algorithms},
   9190         booktitle = {Peer-to-Peer Systems and Applications},
   9191         series = {Lecture Notes in Computer Science},
   9192         volume = {3485},
   9193         year = {2005},
   9194         pages = {95--117},
   9195         publisher = {Springer},
   9196         organization = {Springer},
   9197         chapter = {8},
   9198         abstract = {Several different approaches to realizing the basic principles of DHTs have emerged over the last few years. Although they rely on the same fundamental idea, there is a large diversity of methods for both organizing the identifier space and performing routing. The particular properties of each approach can thus be exploited by specific application scenarios and requirements.
   9199 This overview focuses on the three DHT systems that have received the most attention in the research community: Chord, Pastry, and Content Addressable Networks (CAN). Furthermore, the systems Symphony, Viceroy, and Kademlia are discussed because they exhibit interesting mechanisms and properties beyond those of the first three systems},
   9200         www_section = {CAN, Chord, Content Addressable Networks, dblp, distributed hash table, Kademlia, Pastry, Symphony, Viceroy},
   9201         isbn = {3-540-29192-X},
   9202         url = {http://dblp.uni-trier.de/db/conf/p2p/p2p2005lncs.html$\#$GotzRW05},
   9203         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Lecture\%20Notes\%20in\%20Computer\%20Science\%20-\%20Selected\%20DHT\%20Algorithms\%20.pdf},
   9204         author = {G{\"o}tz, Stefan and Rieche, Simon and Klaus Wehrle}
   9205 }
   9206 @conference {Gairing:2005:SRI:1073970.1074000,
   9207         title = {Selfish Routing with Incomplete Information},
   9208         booktitle = {SPAA'05. Proceedings of the 17th Annual ACM Symposium on Parallelism in Algorithms and Architectures},
   9209         series = {SPAA '05},
   9210         year = {2005},
   9211         month = jul,
   9212         pages = {203--212},
   9213         publisher = {ACM},
   9214         organization = {ACM},
   9215         address = {Las Vegas, Nevada},
   9216         abstract = {In his seminal work Harsanyi introduced an elegant approach to study non-cooperative games with incomplete information where the players are uncertain about some parameters. To model such games he introduced the Harsanyi transformation, which converts a game with incomplete information to a strategic game where players may have different types. In the resulting Bayesian game players' uncertainty about each others types is described by a probability distribution over all possible type profiles.In this work, we introduce a particular selfish routing game with incomplete information that we call Bayesian routing game. Here, n selfish users wish to assign their traffic to one of m links. Users do not know each others traffic. Following Harsanyi's approach, we introduce for each user a set of possible types.This paper presents a comprehensive collection of results for the Bayesian routing game.We prove, with help of a potential function, that every Bayesian routing game possesses a pure Bayesian Nash equilibrium. For the model of identical links and independent type distribution we give a polynomial time algorithm to compute a pure Bayesian Nash equilibrium.We study structural properties of fully mixed Bayesian Nash equilibria for the model of identical links and show that they maximize individual cost. In general there exists more than one fully mixed Bayesian Nash equilibrium. We characterize the class of fully mixed Bayesian Nash equilibria in the case of independent type distribution.We conclude with results on coordination ratio for the model of identical links for three social cost measures, that is, social cost as expected maximum congestion, sum of individual costs and maximum individual cost. For the latter two we are able to give (asymptotic) tight bounds using our results on fully mixed Bayesian Nash equilibria.To the best of our knowledge this is the first time that mixed Bayesian Nash equilibria have been studied in conjunction with social cost},
   9217         www_section = {bayesian game, coordination ratio, incomplete information, nash equilibria, selfish routing},
   9218         isbn = {1-58113-986-1},
   9219         doi = {http://doi.acm.org/10.1145/1073970.1074000},
   9220         url = {http://doi.acm.org/10.1145/1073970.1074000},
   9221         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SPAA\%2705\%20-\%20Selfish\%20routing\%20with\%20incomplete\%20information.pdf},
   9222         author = {Gairing, Martin and Monien, Burkhard and Tiemann, Karsten}
   9223 }
   9224 @conference {kutzner05dvdr,
   9225         title = {A Self-Organizing Job Scheduling Algorithm for a Distributed VDR},
   9226         booktitle = {Workshop "Peer-to-Peer-Systeme und -Anwendungen", 14. Fachtagung Kommunikation in Verteilten Systemen (KiVS 2005)},
   9227         year = {2005},
   9228         type = {publication},
   9229         address = {Kaiserslautern, Germany},
   9230         abstract = {In [CKF04], we have reported on our concept of a peer-to-peer extension to the popular video disk recorder (VDR) [Sch04], the Distributed Video Disk Recording (DVDR) system. The DVDR is a collaboration system of existing video disk recorders via a peer to peer network. There, the VDRs communicate about the tasks to be done and distribute the recordings afterwards. In this paper, we report on lessons learnt during its implementation and explain the considerations leading to the design of a new job scheduling algorithm. DVDR is an application which is based on a distributed hash table (DHT) employing proximity route selection (PRS)/proximity neighbor selection (PNS). For our implementation, we chose to use Chord [SMK + 01, GGG + 03]. Using a DHT with PRS/PNS yields two important features: (1) Each hashed key is routed to exactly one destination node within the system. (2) PRS/PNS forces messages originating in one region of the network destined to the same key to be routed through exactly one node in that region (route convergence). The first property enables per-key aggregation trees with a tree being rooted at the node which is responsible for the respective key. This node serves as a rendezvous point. The second property leads to locality (i.e., low latency) in this aggregation tree},
   9231         www_section = {Chord, distributed hash table, proximity neighbor selection},
   9232         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9233         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner05dvdr.pdf},
   9234         author = {Kendy Kutzner and Cramer, Curt and Thomas Fuhrmann}
   9235 }
   9236 @conference {fuhrmann05networking,
   9237         title = {A Self-Organizing Routing Scheme for Random Networks},
   9238         booktitle = {Proceedings of the 4th IFIP-TC6 Networking Conference},
   9239         year = {2005},
   9240         pages = {1366--1370},
   9241         publisher = {Springer Berlin / Heidelberg},
   9242         organization = {Springer Berlin / Heidelberg},
   9243         type = {publication},
   9244         address = {Waterloo, Canada},
   9245         abstract = {Most routing protocols employ address aggregation to achieve scalability with respect to routing table size. But often, as networks grow in size and complexity, address aggregation fails. Other networks, e.g. sensor-actuator networks or ad-hoc networks, that are characterized by organic growth might not at all follow the classical hierarchical structures that are required for aggregation.
   9246 In this paper, we present a fully self-organizing routing scheme that is able to efficiently route messages in random networks with randomly assigned node addresses. The protocol combines peer-to-peer techniques with source routing and can be implemented to work with very limited resource demands. With the help of simulations we show that it nevertheless quickly converges into a globally consistent state and achieves a routing stretch of only 1.2 -- 1.3 in a network with more than 105 randomly assigned nodes},
   9247         www_section = {ad-hoc networks, P2P, self-organization},
   9248         isbn = {978-3-540-25809-4},
   9249         doi = {10.1007/b136094},
   9250         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9251         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann05networking.pdf},
   9252         author = {Thomas Fuhrmann}
   9253 }
   9254 @conference {Irwin:2005:SVC:1080192.1080194,
   9255         title = {Self-recharging virtual currency},
   9256         booktitle = {P2PECON'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems},
   9257         series = {P2PECON '05},
   9258         year = {2005},
   9259         month = aug,
   9260         pages = {93--98},
   9261         publisher = {ACM},
   9262         organization = {ACM},
   9263         address = {Philadelphia, Pennsylvania, USA},
   9264         abstract = {Market-based control is attractive for networked computing utilities in which consumers compete for shared resources (computers, storage, network bandwidth). This paper proposes a new self-recharging virtual currency model as a common medium of exchange in a computational market. The key idea is to recycle currency through the economy automatically while bounding the rate of spending by consumers. Currency budgets may be distributed among consumers according to any global policy; consumers spend their budgets to schedule their resource usage through time, but cannot hoard their currency or starve.We outline the design and rationale for self-recharging currency in Cereus, a system for market-based community resource sharing, in which participants are authenticated and sanctions are sufficient to discourage fraudulent behavior. Currency transactions in Cereus are accountable: offline third-party audits can detect and prove cheating, so participants may transfer and recharge currency autonomously without involvement of the trusted banking service},
   9265         www_section = {market, virtual currency},
   9266         isbn = {1-59593-026-4},
   9267         doi = {http://doi.acm.org/10.1145/1080192.1080194},
   9268         url = {http://doi.acm.org/10.1145/1080192.1080194},
   9269         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20Self-recharging\%20virtual\%20currency.pdf},
   9270         author = {Irwin, David and Chase, Jeff and Grit, Laura and Yumerefendi, Aydan}
   9271 }
   9272 @conference {cramer05selfstabilizing,
   9273         title = {Self-Stabilizing Ring Networks on Connected Graphs},
   9274         booktitle = {Self-Stabilizing Ring Networks on Connected Graphs},
   9275         year = {2005},
   9276         type = {Technical Report},
   9277         address = {University of Karlsruhe (TH), Fakultaet fuer Informatik, Technical Report 2005-5},
   9278         abstract = {Large networks require scalable routing. Traditionally, protocol overhead is reduced by introducing a hierarchy. This requires aggregation of nearby nodes under a common address prefix. In fixed networks, this is achieved administratively, whereas in wireless ad-hoc networks, dynamic assignments of nodes to aggregation units are required. As a result of the nodes commonly being assigned a random network address, the majority of proposed ad-hoc routing protocols discovers routes between end nodes by flooding, thus limiting the network size. Peer-to-peer (P2P) overlay networks offer scalable routing solutions by employing virtualized address spaces, yet assume an underlying routing protocol for end-to-end connectivity. We investigate a cross-layer approach to P2P routing, where the virtual address space is implemented with a network-layer routing protocol by itself. The Iterative Successor Pointer Rewiring Protocol (ISPRP) efficiently initializes a ring-structured network among nodes having but link-layer connectivity. It is fully self-organizing and issues only a small per-node amount of messages by keeping interactions between nodes as local as possible. The main contribution of this paper is a proof that ISPRP is self-stabilizing, that is, starting from an arbitrary initial state, the protocol lets the network converge into a correct state within a bounded amount of time},
   9279         www_section = {ad-hoc networks, P2P},
   9280         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9281         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer05selfstabilizing.pdf},
   9282         author = {Cramer, Curt and Thomas Fuhrmann}
   9283 }
   9284 @article {10.1109/PERSER.2005.1506410,
   9285         title = {Service discovery using volunteer nodes for pervasive environments},
   9286         journal = {International Conference on Pervasive Services},
   9287         year = {2005},
   9288         pages = {188--197},
   9289         publisher = {IEEE Computer Society},
   9290         address = {Los Alamitos, CA, USA},
   9291         abstract = {We propose a service discovery architecture called VSD (service discovery based on volunteers) for heterogeneous and dynamic pervasive computing environments. The proposed architecture uses a small subset of the nodes called volunteers that perform directory services. Relatively stable and capable nodes serve as volunteers, thus recognizing node heterogeneity in terms of mobility and capability. We discuss characteristics of VSD architecture and methods to improve connectivity among volunteers for higher discovery rate. By showing that VSD performs quite well compared to a broadcast based scheme in MANET scenarios, we validate that VSD is a flexible and adaptable architecture appropriate for dynamic pervasive computing environments. VSD incorporates several novel features: i) handles dynamism and supports self-reconfiguration; ii) provides physical locality and scalability; and iii) improves reliability and copes with uncertainty through redundancy by forming overlapped clusters},
   9292         isbn = {0-7803-9032-6},
   9293         doi = {10.1109/PERSER.2005.1506410},
   9294         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/PERSER.2005.1506410},
   9295         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/31.pdf},
   9296         author = {Mijeom Kim and Mohan Kumar and Behrooz Shirazi}
   9297 }
   9298 @book {Bartolini:2005:SFA:2167504.2167521,
   9299         title = {A software framework for automated negotiation},
   9300         booktitle = {Software Engineering for Multi-Agent Systems III},
   9301         series = {Lecture Notes in Computer Science},
   9302         volume = {3390},
   9303         year = {2005},
   9304         pages = {213--235},
   9305         publisher = {Springer-Verlag},
   9306         organization = {Springer-Verlag},
   9307         chapter = {A software framework for automated negotiation},
   9308         address = {Berlin, Heidelberg},
   9309         abstract = {If agents are to negotiate automatically with one another they must share a negotiation mechanism, specifying what possible actions each party can take at any given time, when negotiation terminates, and what is the structure of the resulting agreements. Current standardization activities such as FIPA [2] and WS-Agreement [3] represent this as a negotiation protocol specifying the flow of messages. However, they omit other aspects of the rules of negotiation (such as obliging a participant to improve on a previous offer), requiring these to be represented implicitly in an agent's design, potentially resulting incompatibility, maintenance and re-usability problems. In this chapter, we propose an alternative approach, allowing all of a mechanism to be formal and explicit. We present (i) a taxonomy of declarative rules which can be used to capture a wide variety of negotiation mechanisms in a principled and well-structured way; (ii) a simple interaction protocol, which is able to support any mechanism which can be captured using the declarative rules; (iii) a software framework for negotiation that allows agents to effectively participate in negotiations defined using our rule taxonomy and protocol and (iv) a language for expressing aspects of the negotiation based on OWL-Lite [4]. We provide examples of some of the mechanisms that the framework can support},
   9310         www_section = {framework, negotiation},
   9311         isbn = {3-540-24843-9},
   9312         url = {http://dl.acm.org/citation.cfm?id=2167504.2167521},
   9313         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SELMAS\%20-\%20Software\%20framework\%20for\%20automated\%20negotiation.pdf},
   9314         author = {Bartolini, Claudio and Preist, Chris and Nicholas R Jennings},
   9315         editor = {Choren, Ricardo and Garcia, Alessandro and Lucena, Carlos and Romanovsky, Alexander}
   9316 }
   9317 @conference {Bharambe:2005:OBP:1064212.1064273,
   9318         title = {Some observations on BitTorrent performance},
   9319         booktitle = {Proceedings of the 2005 ACM SIGMETRICS International Conference on Measurement and Modeling of Computer Systems},
   9320         series = {SIGMETRICS '05},
   9321         year = {2005},
   9322         month = jun,
   9323         pages = {398--399},
   9324         publisher = {ACM},
   9325         organization = {ACM},
   9326         address = {New York, NY, USA},
   9327         abstract = {In this paper, we present a simulation-based study of BitTorrent. Our results confirm that BitTorrent performs near-optimally in terms of uplink bandwidth utilization and download time, except under certain extreme conditions. On fairness, however, our work shows that low bandwidth peers systematically download more than they upload to the network when high bandwidth peers are present. We find that the rate-based tit-for-tat policy is not effective in preventing unfairness. We show how simple changes to the tracker and a stricter, block-based tit-for-tat policy, greatly improves fairness, while maintaining high utilization},
   9328         www_section = {bandwidth utilization, BitTorrent, fairness},
   9329         isbn = {1-59593-022-1},
   9330         doi = {http://doi.acm.org/10.1145/1064212.1064273},
   9331         url = {http://doi.acm.org/10.1145/1064212.1064273},
   9332         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGMETRICS\%2705\%20-\%20Bharambe\%2C\%20Herley\%20\%26\%20Padmanabhan.pdf},
   9333         author = {Bharambe, Ashwin R. and Herley, Cormac and Venkata N. Padmanabhan}
   9334 }
   9335 @conference {LuFSG05,
   9336         title = {Some Remarks on Universal Re-encryption and A Novel Practical Anonymous Tunnel},
   9337         booktitle = {Proceedings of ICCNMC},
   9338         year = {2005},
   9339         pages = {853--862},
   9340         abstract = {In 2004 Golle, Jakobsson, Juels and Syverson presented a new encryption scheme called the universal re-encryption [GJJS04] for mixnets [Cha81] which was extended by Gomulkiewicz et al. [GKK04]. We discover that this scheme and its extension both are insecure against a chosen ciphertext attack proposed by Pfitzmann in 1994 [Pfi94]. Another drawback of them is low efficiency for anonymous communications due to their long ciphertexts, i.e., four times the size of plaintext. Accordingly, we devise a novel universal and efficient anonymous tunnel, rWonGoo, for circuit-based low-latency communications in large scale peer-to-peer environments to dramatically decrease possibility to suffer from the attack [Pfi94]. The basic idea behind rWonGoo is to provide anonymity with re-encryption and random forwarding, obtaining practicality, correctness and efficiency in encryption in the way differing from the layered encryption systems [Cha81] that can be difficult to achieve correctness of tunnels},
   9341         isbn = {978-3-540-28102-3},
   9342         doi = {10.1007/11534310},
   9343         url = {http://www.springerlink.com/content/b3x4na87xbmcextx/},
   9344         author = {Tianbo Lu and Bin-Xing Fang and Yuzhong Sun and Li Guo}
   9345 }
   9346 @article {Lua05asurvey,
   9347         title = {A Survey and Comparison of Peer-to-Peer Overlay Network Schemes},
   9348         journal = {IEEE Communications Surveys and Tutorials},
   9349         volume = {7},
   9350         year = {2005},
   9351         pages = {72--93},
   9352         abstract = {Over the Internet today, computing and communications environments are significantly more complex and chaotic than classical distributed systems, lacking any centralized organization or hierarchical control. There has been much interest in emerging Peer-to-Peer (P2P) network overlays because they provide a good substrate for creating large-scale data sharing, content distribution and application-level multicast applications. These P2P networks try to provide a long list of features such as: selection of nearby peers, redundant storage, efficient search/location of data items, data permanence or guarantees, hierarchical naming, trust and authentication, and, anonymity. P2P networks potentially offer an efficient routing architecture that is self-organizing, massively scalable, and robust in the wide-area, combining fault tolerance, load balancing and explicit notion of locality. In this paper, we present a survey and comparison of various Structured and Unstructured P2P networks. We categorize the various schemes into these two groups in the design spectrum and discuss the application-level network performance of each group},
   9353         www_section = {overlay, peer-to-peer networking},
   9354         issn = {1553-877X },
   9355         doi = {10.1109/COMST.2005.1610546},
   9356         url = {http://www.slideshare.net/networkingcentral/a-survey-and-comparison-of-peertopeer-overlay-network-schemes},
   9357         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20CST\%20-\%20A\%20Survey\%20and\%20Comparison\%20of\%20Peer-to-Peer\%20Overlay.pdf},
   9358         author = {Eng Keong Lua and Jon Crowcroft and Marcelo Pias and Ravi Sharma and Steven Lim}
   9359 }
   9360 @conference {Cheng:2005:SRM:1080192.1080202,
   9361         title = {Sybilproof reputation mechanisms},
   9362         booktitle = {Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems},
   9363         series = {P2PECON '05},
   9364         year = {2005},
   9365         month = aug,
   9366         pages = {128--132},
   9367         publisher = {ACM},
   9368         organization = {ACM},
   9369         address = {Philadelphia, PA},
   9370         abstract = {Due to the open, anonymous nature of many P2P networks, new identities--or sybils--may be created cheaply and in large numbers. Given a reputation system, a peer may attempt to falsely raise its reputation by creating fake links between its sybils. Many existing reputation mechanisms are not resistant to these types of strategies.Using a static graph formulation of reputation, we attempt to formalize the notion of sybilproofness. We show that there is no symmetric sybilproof reputation function. For nonsymmetric reputations, following the notion of reputation propagation along paths, we give a general asymmetric reputation function based on flow and give conditions for sybilproofness},
   9371         www_section = {peer-to-peer networking, reputation, Sybil attack},
   9372         isbn = {1-59593-026-4},
   9373         doi = {http://doi.acm.org/10.1145/1080192.1080202},
   9374         url = {http://doi.acm.org/10.1145/1080192.1080202},
   9375         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACM\%20SIGCOMM\%2705\%20-\%20Cheng\%20\%26\%20Friedman\%20-\%20Sybilproof\%20reputation\%20mechanisms.pdf},
   9376         author = {Cheng, Alice and Eric Friedman}
   9377 }
   9378 @conference {Danezis05sybil-resistantdht,
   9379         title = {Sybil-resistant DHT routing},
   9380         booktitle = {In ESORICS},
   9381         year = {2005},
   9382         pages = {305--318},
   9383         publisher = {Springer},
   9384         organization = {Springer},
   9385         abstract = {Distributed Hash Tables (DHTs) are very efficient distributed systems for routing, but at the same time vulnerable to disruptive nodes. Designers of such systems want them used in open networks, where an adversary can perform a sybil attack by introducing a large number of corrupt nodes in the network, considerably degrading its performance. We introduce a routing strategy that alleviates some of the effects of such an attack by making sure that lookups are performed using a diverse set of nodes. This ensures that at least some of the nodes queried are good, and hence the search makes forward progress. This strategy makes use of latent social information present in the introduction graph of the network},
   9386         www_section = {distributed hash table, routing},
   9387         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.3947},
   9388         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sybildht.pdf},
   9389         author = {George Danezis and Chris Lesniewski-laas and Frans M. Kaashoek and Ross Anderson}
   9390 }
   9391 @conference {Nielson05ataxonomy,
   9392         title = {A Taxonomy of Rational Attacks},
   9393         booktitle = {Proceedings of the 4th International Workshop on Peer-to-Peer Systems (IPTPS '05},
   9394         year = {2005},
   9395         pages = {36--46},
   9396         publisher = {Springer-Verlag},
   9397         organization = {Springer-Verlag},
   9398         abstract = {For peer-to-peer services to be effective, participating nodes must cooperate, but in most scenarios a node represents a self-interested party and cooperation can neither be expected nor enforced. A reasonable assumption is that a large fraction of p2p nodes are rational and will attempt to maximize their consumption of system resources while minimizing the use of their own. If such behavior violates system policy then it constitutes an attack. In this paper we identify and create a taxonomy for rational attacks and then identify corresponding solutions if they exist. The most effective solutions directly incentivize cooperative behavior, but when this is not feasible the common alternative is to incentivize evidence of cooperation instead},
   9399         www_section = {attack, P2P},
   9400         doi = {10.1007/11558989},
   9401         url = {http://www.springerlink.com/content/lh21385ml723844j/},
   9402         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CameraReady_240.pdf},
   9403         author = {Seth James Nielson and Scott A. Crosby}
   9404 }
   9405 @booklet {UCAM-CL-TR-637,
   9406         title = {The Topology of Covert Conflict},
   9407         number = {UCAM-CL-TR-637},
   9408         year = {2005},
   9409         month = {July},
   9410         publisher = {University of Cambridge Computer Laboratory},
   9411         abstract = {This is a short talk on topology of covert conflict, comprising joint work I've been doing with Ross Anderson. The background of this work is the following. We consider a conflict, and there are parties to the conflict. There is communication going on that can be abstracted as a network of nodes (parties) and links (social ties between the nodes). We contend that once you've got a conflict and you've got enough parties to it, these guys start communicating as a result of the conflict. They form connections, that influences the conflict, and the dynamics of the conflict in turn feeds the connectivity of the unfolding network.
   9412 Modern conflicts often turn on connectivity: consider, for instance, anything from the American army's attack on the Taleban in Afghanistan, and elsewhere, or medics who are trying to battle a disease, like Aids, or anything else. All of these turn on, making strategic decisions about which nodes to go after in the network. For instance, you could consider that a good first place to give condoms out and start any Aids programme, would be with prostitutes},
   9413         doi = {10.1007/978-3-540-77156-2},
   9414         url = {http://www.springerlink.com/content/p885q38262486876/},
   9415         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UCAM-CL-TR-637.pdf},
   9416         author = {Shishir Nagaraja and Ross Anderson}
   9417 }
   9418 @conference {kutzner05autonomic,
   9419         title = {Towards Autonomic Networking using Overlay Routing Techniques},
   9420         booktitle = {Proceedings of the 18th International Conference on Architecture of Computing Systems (ARCS '05)--System Aspects in Organic and Pervasive Computing},
   9421         year = {2005},
   9422         pages = {222--235},
   9423         publisher = {Springer Berlin / Heidelberg},
   9424         organization = {Springer Berlin / Heidelberg},
   9425         type = {publication},
   9426         address = {Innsbruck, Austria},
   9427         abstract = {With an ever-growing number of computers being embedded into our surroundings, the era of ubiquitous computing is approaching fast. However, as the number of networked devices increases, so does system complexity. Contrary to the goal of achieving an invisible computer, the required amount of management and human intervention increases more and more, both slowing down the growth rate and limiting the achievable size of ubiquitous systems.
   9428 In this paper we present a novel routing approach that is capable of handling complex networks without any administrative intervention. Based on a combination of standard overlay routing techniques and source routes, this approach is capable of efficiently bootstrapping a routable network. Unlike other approaches that try to combine peer-to-peer ideas with ad-hoc networks, sensor networks, or ubiquitous systems, our approach is not based on a routing scheme. This makes the resulting system flexible and powerful with respect at application support as well as efficient with regard to routing overhead and system complexity},
   9429         www_section = {autonomous systems, overlay networks, P2P},
   9430         isbn = {978-3-540-25273-3},
   9431         doi = {10.1007/b106632},
   9432         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9433         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner05autonomic.pdf},
   9434         author = {Kendy Kutzner and Thomas Fuhrmann}
   9435 }
   9436 @conference {WangCJ05,
   9437         title = {Tracking anonymous peer-to-peer VoIP calls on the internet},
   9438         booktitle = {Proceedings of the ACM Conference on Computer and Communications Security},
   9439         year = {2005},
   9440         month = {November},
   9441         pages = {81--91},
   9442         publisher = {ACM  New York, NY, USA},
   9443         organization = {ACM  New York, NY, USA},
   9444         abstract = {Peer-to-peer VoIP calls are becoming increasingly popular due to their advantages in cost and convenience. When these calls are encrypted from end to end and anonymized by low latency anonymizing network, they are considered by many people to be both secure and anonymous.In this paper, we present a watermark technique that could be used for effectively identifying and correlating encrypted, peer-to-peer VoIP calls even if they are anonymized by low latency anonymizing networks. This result is in contrast to many people's perception. The key idea is to embed a unique watermark into the encrypted VoIP flow by slightly adjusting the timing of selected packets. Our analysis shows that it only takes several milliseconds time adjustment to make normal VoIP flows highly unique and the embedded watermark could be preserved across the low latency anonymizing network if appropriate redundancy is applied. Our analytical results are backed up by the real-time experiments performed on leading peer-to-peer VoIP client and on a commercially deployed anonymizing network. Our results demonstrate that (1) tracking anonymous peer-to-peer VoIP calls on the Internet is feasible and (2) low latency anonymizing networks are susceptible to timing attacks},
   9445         www_section = {anonymity, P2P},
   9446         isbn = {1-59593-226-7},
   9447         doi = {10.1145/1102120.1102133},
   9448         url = {http://portal.acm.org/citation.cfm?id=1102120.1102133},
   9449         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WangCJ05.pdf},
   9450         author = {Xinyuan Wang and Shiping Chen and Sushil Jajodia}
   9451 }
   9452 @conference {pet05-zhu,
   9453         title = {Unmixing Mix Traffic},
   9454         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)},
   9455         year = {2005},
   9456         month = {May},
   9457         pages = {110--127},
   9458         publisher = {Springer Berlin / Heidelberg},
   9459         organization = {Springer Berlin / Heidelberg},
   9460         abstract = {We apply blind source separation techniques from statistical signal processing to separate the traffic in a mix network. Our experiments show that this attack is effective and scalable. By combining the flow separation method and frequency spectrum matching method, a passive attacker can get the traffic map of the mix network. We use a non-trivial network to show that the combined attack works. The experiments also show that multicast traffic can be dangerous for anonymity networks},
   9461         www_section = {anonymity},
   9462         isbn = {978-3-540-34745-3},
   9463         doi = {10.1007/11767831},
   9464         url = {http://www.springerlink.com/content/l5110366246k5003/},
   9465         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-zhu.pdf},
   9466         author = {Ye Zhu and Riccardo Bettati}
   9467 }
   9468 @conference {fuhrmann05emnets,
   9469         title = {The Use of Scalable Source Routing for Networked Sensors},
   9470         booktitle = {Proceedings of the 2nd IEEE Workshop on Embedded Networked Sensors},
   9471         year = {2005},
   9472         pages = {163--165},
   9473         publisher = {IEEE Computer Society  Washington, DC, USA},
   9474         organization = {IEEE Computer Society  Washington, DC, USA},
   9475         type = {publication},
   9476         address = {Sydney, Australia},
   9477         abstract = {In this paper, we briefly present a novel routing algorithm, scalable source routing (SSR), which is capable of memory and message efficient routing in networks with 'random topology'. This algorithm enables sensor networks to use recent peer to-peer mechanisms from the field of overlay networks, like e.g. distributed hash tables and indirection infrastructures. Unlike other proposals along that direction, SSR integrates all necessary routing tasks into one simple, highly efficient routing protocol. Simulations demonstrate that in a small-world network with more than 100 000 nodes, SSR requires each node to only store routing data for 255 other nodes to establish routes between arbitrary pairs of nodes. These routes are on average only about 20-30\% longer than the globally optimal path between these nodes},
   9478         www_section = {scalable source routing, topology matching},
   9479         isbn = {0-7803-9246-9},
   9480         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9481         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann05emnets.pdf},
   9482         author = {Thomas Fuhrmann}
   9483 }
   9484 @conference {DBLP:conf/sigcomm/JainDPF05,
   9485         title = {Using redundancy to cope with failures in a delay tolerant network},
   9486         booktitle = {SIGCOMM},
   9487         year = {2005},
   9488         pages = {109--120},
   9489         publisher = {ACM  New York, NY, USA},
   9490         organization = {ACM  New York, NY, USA},
   9491         address = {Philadelphia, Pennsylvania, USA},
   9492         abstract = {We consider the problem of routing in a delay tolerant network (DTN) in the presence of path failures. Previous work on DTN routing has focused on using precisely known network dynamics, which does not account for message losses due to link failures, buffer overruns, path selection errors, unscheduled delays, or other problems. We show how to split, replicate, and erasure code message fragments over multiple delivery paths to optimize the probability of successful message delivery. We provide a formulation of this problem and solve it for two cases: a 0/1 (Bernoulli) path delivery model where messages are either fully lost or delivered, and a Gaussian path delivery model where only a fraction of a message may be delivered. Ideas from the modern portfolio theory literature are borrowed to solve the underlying optimization problem. Our approach is directly relevant to solving similar problems that arise in replica placement in distributed file systems and virtual node placement in DHTs. In three different simulated DTN scenarios covering a wide range of applications, we show the effectiveness of our approach in handling failures},
   9493         www_section = {delay tolerant network, routing},
   9494         isbn = {1-59593-009-4},
   9495         doi = {10.1145/1080091.1080106},
   9496         url = {http://portal.acm.org/citation.cfm?doid=1080091.1080106},
   9497         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper-JaiDem.pdf},
   9498         author = {Sushant Jain and Michael J. Demmer and Rabin K. Patra and Fall, Kevin}
   9499 }
   9500 @booklet {Cooley_abs:the,
   9501         title = {ABS: The Apportioned Backup System},
   9502         year = {2004},
   9503         abstract = {Many personal computers are operated with no backup strategy for protecting data in the event of loss or failure. At the same time, PCs are likely to contain spare disk space and unused networking resources. We present the Apportioned Backup System (ABS), which provides a reliable collaborative backup resource by leveraging these independent, distributed resources. With ABS, procuring and maintaining specialized backup hardware is unnecessary. ABS makes efficient use of network and storage resources through use of coding techniques, convergent encryption and storage, and efficient versioning and verification processes. The system also painlessly accommodates dynamic expansion of system compute, storage, and network resources, and is tolerant of catastrophic node failures},
   9504         www_section = {apportioned backup system},
   9505         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.6858},
   9506         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.120.6858.pdf},
   9507         author = {Joe Cooley and Chris Taylor and Alen Peacock}
   9508 }
   9509 @conference {Baset04ananalysis,
   9510         title = {An Analysis of the Skype Peer-to-Peer Internet Telephony Protocol},
   9511         booktitle = {INFOCOM 2006. Proceedings of the 25th Annual Joint Conference of the IEEE Computer and Communications Societies},
   9512         year = {2004},
   9513         month = apr,
   9514         address = {Barcelona, Catalunya, Spain},
   9515         abstract = {Skype is a peer-to-peer VoIP client developed by KaZaa in 2003. Skype claims that it can work almost seamlessly across NATs and firewalls and has better voice quality than the MSN and Yahoo IM applications. It encrypts calls end-to-end, and stores user information in a decentralized fashion. Skype also supports instant messaging and conferencing. This report analyzes key Skype functions such as login, NAT and firewall traversal, call establishment, media transfer, codecs, and conferencing under three different network setups. Analysis is performed by careful study of Skype network traffic},
   9516         www_section = {P2P, VoIP},
   9517         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.84.2433},
   9518         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cucs-039-04.pdf},
   9519         author = {Salman A. Baset and Henning G. Schulzrinne}
   9520 }
   9521 @conference {newman:pet2004,
   9522         title = {Anonymity and Covert Channels in Simple Timed Mix-firewalls},
   9523         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
   9524         series = {LNCS},
   9525         volume = {3424},
   9526         year = {2004},
   9527         month = {May},
   9528         pages = {1--16},
   9529         publisher = {Springer Berlin / Heidelberg},
   9530         organization = {Springer Berlin / Heidelberg},
   9531         abstract = {Traditional methods for evaluating the amount of anonymity afforded by various Mix configurations have depended on either measuring the size of the set of possible senders of a particular message (the anonymity set size), or by measuring the entropy associated with the probability distribution of the messages possible senders. This paper explores further an alternative way of assessing the anonymity of a Mix system by considering the capacity of a covert channel from a sender behind the Mix to an observer of the Mix's output.
   9532 Initial work considered a simple model, with an observer (Eve) restricted to counting the number of messages leaving a Mix configured as a firewall guarding an enclave with one malicious sender (Alice) and some other naive senders (Cluelessi's). Here, we consider the case where Eve can distinguish between multiple destinations, and the senders can select to which destination their message (if any) is sent each clock tick},
   9533         isbn = {978-3-540-26203-9},
   9534         doi = {10.1007/b136164},
   9535         url = {http://www.springerlink.com/content/w256n3dfl6wf2q3m/},
   9536         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/newman-pet2004.pdf},
   9537         author = {Richard E. Newman and Vipan R. Nalla and Ira S. Moskowitz}
   9538 }
   9539 @article {halpern-oneill-2003,
   9540         title = {Anonymity and Information Hiding in Multiagent Systems},
   9541         journal = {Journal of Computer Security},
   9542         volume = {13},
   9543         year = {2004},
   9544         pages = {483--514  },
   9545         abstract = {We Provide a framework for reasoning about information-hiding requirements in multiagent systems and for reasoning about anonymity in particular. Our framework employs the modal logic of knowledge within the context of the runs and systems framework, much in the spirit of our carlier work on secercy [13]. we give several definitions of anonymity with respect to agents, actions and observers in multiagent systems, and we relate our defenitions of anonymity to other definitions of information hiding, such as secrecy. We also give probabilistic definitions of anonymity that are able to quantify an observer's uncertainty about the state of the system. Finally, we relate our definitions of anonymity to other formalizations of anonymity and information hiding, including defenitions of anonymity in the process algebra CSP and defenitions of information hiding using function views},
   9546         www_section = {anonymity, epistemic logic, formal methods},
   9547         issn = {0926-227X},
   9548         url = {http://portal.acm.org/citation.cfm?id=1145953},
   9549         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/halpern-oneill-2003.pdf},
   9550         author = {Joseph Y. Halpern and Kevin R. O'Neil}
   9551 }
   9552 @mastersthesis {andrei-thesis,
   9553         title = {On the Anonymity of Anonymity Systems},
   9554         year = {2004},
   9555         month = {June},
   9556         school = {University of Cambridge},
   9557         type = {phd},
   9558         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/andrei-thesis.pdf},
   9559         author = {Andrei Serjantov}
   9560 }
   9561 @conference {wisa04-Klonowski,
   9562         title = {Anonymous Communication with On-line and Off-line Onion Encoding},
   9563         booktitle = {Proceedings of Workshop on Information Security Applications (WISA 2004)},
   9564         year = {2004},
   9565         month = {August},
   9566         publisher = {Springer Berlin / Heidelberg},
   9567         organization = {Springer Berlin / Heidelberg},
   9568         abstract = {Encapsulating messages in onions is one of the major techniques providing anonymous communication in computer networks. To some extent, it provides security against traffic analysis by a passive adversary. However, it can be highly vulnerable to attacks by an active adversary. For instance, the adversary may perform a simple so--called repetitive attack: a malicious server sends the same massage twice, then the adversary traces places where the same message appears twice -- revealing the route of the original message. A repetitive attack was examined for mix--networks. However, none of the countermeasures designed is suitable for onion--routing.
   9569 In this paper we propose an {\textquotedblleft}onion-like{\textquotedblright} encoding design based on universal reencryption. The onions constructed in this way can be used in a protocol that achieves the same goals as the classical onions, however, at the same time we achieve immunity against a repetitive attack. Even if an adversary disturbs communication and prevents processing a message somewhere on the onion path, it is easy to identify the malicious server performing the attack and provide an evidence of its illegal behavior},
   9570         www_section = {onion routing, repetitive attack, universal re-encryption, unlinkability},
   9571         isbn = {978-3-540-24302-1},
   9572         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wisa04-Klonowski.pdf},
   9573         author = {Marcin Gomulkiewicz and Marek Klonowski and Miroslaw Kutylowski}
   9574 }
   9575 @conference {Mislove04ap3:cooperative,
   9576         title = {AP3: Cooperative, decentralized anonymous communication},
   9577         booktitle = {IN PROC. OF SIGOPS EUROPEAN WORKSHOP},
   9578   author = {Mislove, Alan and Oberoi, Gaurav and Post, Ansley and Reis, Charles and Druschel, Peter and Wallach, Dan S},
   9579         year = {2004},
   9580         abstract = {This paper describes a cooperative overlay network that provides anonymous communication services for participating users. The Anonymizing Peer-to-Peer Proxy (AP3) system provides clients with three primitives: (i) anonymous message delivery, (ii) anonymous channels, and (iii) secure pseudonyms. AP3 is designed to be lightweight, low-cost and provides "probable innocence" anonymity to participating users, even under a large-scale coordinated attack by a limited fraction of malicious overlay nodes. Additionally, we use AP3's primitives to build novel anonymous group communication facilities (multicast and anycast), which shield the identity of both publishers and subscribers},
   9581         www_section = {anonymity, Peer-to-Peer Proxy},
   9582         doi = {10.1145/1133572.1133578},
   9583         url = {http://portal.acm.org/citation.cfm?id=1133578},
   9584         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.6219.pdf}
   9585 }
   9586 @booklet {2004_0,
   9587         title = {Apres-a system for anonymous presence},
   9588         year = {2004},
   9589         abstract = {If Alice wants to know when Bob is online, and they don't want anyone else to know their interest in each other, what do they do? Once they know they are both online, they would like to be able to exchange messages, send files, make phone calls to each other, and so forth, all without anyone except them knowing they are doing this. Apres is a system that attempts to make this possible},
   9590         www_section = {anonymous presence, presence},
   9591         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/apres.pdf},
   9592         author = {Laurie, Ben}
   9593 }
   9594 @conference {Antoniadis04anasymptotically,
   9595         title = {An Asymptotically Optimal Scheme for P2P File Sharing},
   9596         booktitle = {2nd Workshop on the Economics of Peer-to-Peer Systems},
   9597         year = {2004},
   9598         month = jan,
   9599         address = {Harvard University},
   9600         abstract = {The asymptotic analysis of certain public good models for p2p systems suggests that when the aim is to maximize social welfare a fixed contribution scheme in terms of the number of files shared can be asymptotically optimal as the number of participants grows to infinity. Such a simple scheme eliminates free riding, is incentive compatible and obtains a value of social welfare that is within o(n) of that obtained by the second-best policy of the corresponding mechanism design formulation of the problem. We extend our model to account for file popularity, and discuss properties of the resulting equilibria. The fact that a simple optimization problem can be used to closely approximate the solution of the exact model (which is in most cases practically intractable both analytically and computationally), is of great importance for studying several interesting aspects of the system. We consider the evolution of the system to equilibrium in its early life, when both peers and the system planner are still learning about system parameters. We also analyse the case of group formation when peers belong to different classes (such as DSL and dial-up users), and it may be to their advantage to form distinct groups instead of a larger single group, or form such a larger group but avoid disclosing their class. We finally discuss the game that occurs when peers know that a fixed fee will be used, but the distribution of their valuations is unknown to the system designer},
   9601         www_section = {asymptotically optimal, P2P, sharing},
   9602         url = {http://www.eecs.harvard.edu/p2pecon/confman/papers },
   9603         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/optimalscheme04.pdf},
   9604         author = {Panayotis Antoniadis and Costas Courcoubetis and Richard Weber}
   9605 }
   9606 @booklet {Levien04attackresistant,
   9607         title = {Attack Resistant Trust Metrics},
   9608         year = {2004},
   9609         abstract = {This dissertation characterizes the space of trust metrics, under both the scalar assumption where each assertion is evaluated independently, and the group assumption where a group of assertions are evaluated in tandem. We present a quantitative framework for evaluating the attack resistance of trust metrics, and give examples of trust metrics that are within a small factor of optimum compared to theoretical upper bounds. We discuss experiences with a realworld deployment of a group trust metric, the Advogato website. Finally, we explore possible applications of attack resistant trust metrics, including using it as to build a distributed name server, verifying metadata in peer-to-peer networks such as music sharing systems, and a proposal for highly spam resistant e-mail delivery},
   9610         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.83.9266},
   9611         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/compact.pdf},
   9612         author = {Raph Levien}
   9613 }
   9614 @conference {2004.Pang.imc.dns,
   9615         title = {Availability, Usage, and Deployment Characteristics of the Domain Name System},
   9616         booktitle = {IMC'04--Proceedings of the 4th ACM SIGCOMM Conference on Internet Measurement},
   9617         year = {2004},
   9618         month = oct,
   9619         publisher = {ACM},
   9620         organization = {ACM},
   9621         address = {Taormina, Sicily, Italy},
   9622         abstract = {The Domain Name System (DNS) is a critical part of the Internet's infrastructure, and is one of the few examples of a robust, highly-scalable, and operational distributed system. Although a few studies have been devoted to characterizing its properties, such as its workload and the stability of the top-level servers, many key components of DNS have not yet been examined. Based on large-scale measurements taken fromservers in a large content distribution network, we present a detailed study of key characteristics of the DNS infrastructure, such as load distribution, availability, and deployment patterns of DNS servers. Our analysis includes both local DNS servers and servers in the authoritative hierarchy. We find that (1) the vast majority of users use a small fraction of deployed name servers, (2) the availability of most name servers is high, and (3) there exists a larger degree of diversity in local DNS server deployment and usage than for authoritative servers. Furthermore, we use our DNS measurements to draw conclusions about federated infrastructures in general. We evaluate and discuss the impact of federated deployment models on future systems, such as Distributed Hash Tables},
   9623         www_section = {availability, DNS, federated},
   9624         isbn = {1-58113-821-0},
   9625         doi = {http://doi.acm.org/10.1145/1028788.1028790},
   9626         url = {http://doi.acm.org/10.1145/1028788.1028790},
   9627         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2704\%20-\%20Availability\%2C\%20Usage\%2C\%20and\%20Deployment\%20Characteristics\%20of\%20the\%20DNS.pdf},
   9628         author = {Jeffrey Pang and James Hendricks and Aditya Akella and Bruce Maggs and Roberto De Prisco and Seshan, Srinivasan}
   9629 }
   9630 @article {1026492,
   9631         title = {Basic Concepts and Taxonomy of Dependable and Secure Computing},
   9632         journal = {IEEE Trans. Dependable Secur. Comput},
   9633         volume = {1},
   9634         number = {1},
   9635         year = {2004},
   9636         pages = {11--33},
   9637         publisher = {IEEE Computer Society Press},
   9638         address = {Los Alamitos, CA, USA},
   9639         abstract = {This paper gives the main definitions relating to dependability, a generic concept including as special case such attributes as reliability, availability, safety, integrity, maintainability, etc. Security brings in concerns for confidentiality, in addition to availability and integrity. Basic definitions are given first. They are then commented upon, and supplemented by additional definitions, which address the threats to dependability and security (faults, errors, failures), their attributes, and the means for their achievement (fault prevention, fault tolerance, fault removal, fault forecasting). The aim is to explicate a set of general concepts, of relevance across a wide range of situations and, therefore, helping communication and cooperation among a number of scientific and technical communities, including ones that are concentrating on particular types of system, of system failures, or of causes of system failures},
   9640         www_section = {attack, fault removal, fault-tolerance, index terms-dependability, trust, vulnerability},
   9641         issn = {1545-5971},
   9642         doi = {10.1109/TDSC.2004.2},
   9643         url = {http://portal.acm.org/citation.cfm?id=1026488.1026492$\#$},
   9644         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2793.pdf},
   9645         author = {Avizienis, Algirdas and Laprie, Jean-Claude and Randell, Brian and Carl Landwehr}
   9646 }
   9647 @mastersthesis {george-thesis,
   9648         title = {Better Anonymous Communications},
   9649         year = {2004},
   9650         month = {July},
   9651         school = {University of Cambridge},
   9652         type = {phd},
   9653         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.3200},
   9654         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/george-thesis.pdf},
   9655         author = {George Danezis}
   9656 }
   9657 @conference {Cramer04Bootstrapping,
   9658         title = {Bootstrapping Locality-Aware P2P Networks},
   9659         booktitle = {Proceedings of the IEEE International Conference on Networks (ICON 2004)},
   9660         address = {Singapore},
   9661         volume = {1},
   9662         year = {2004},
   9663         pages = {357--361},
   9664         type = {publication},
   9665         abstract = {Bootstrapping is a vital core functionality required by every peer-to-peer (P2P) overlay network. Nodes intending to participate in such an overlay network initially have to find at least one node that is already part of this network. While structured P2P networks (e.g. distributed hash tables, DHTs) define rules about how to proceed after this point, unstructured P2P networks continue using bootstrapping techniques until they are sufficiently connected. In this paper, we compare solutions applicable to the bootstrapping problem. Measurements of an existing system, the Gnutella web caches, highlight the inefficiency of this particular approach. Improved bootstrapping mechanisms could also incorporate locality-awareness into the process. We propose an advanced mechanism by which the overlay topology is--to some extent--matched with the underlying topology. Thereby, the performance of the overall system can be vastly improved},
   9666         www_section = {bootstrapping, distributed hash table, P2P},
   9667         isbn = {0-7803-8783-X },
   9668         doi = {10.1109/ICON.2004.1409169},
   9669         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9670         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04bootstrapping.pdf},
   9671         author = {Cramer, Curt and Kendy Kutzner and Thomas Fuhrmann}
   9672 }
   9673 @conference {mixmaster-reliable,
   9674         title = {Comparison between two practical mix designs},
   9675         booktitle = {Proceedings of ESORICS 2004},
   9676         series = {LNCS},
   9677         year = {2004},
   9678         month = {September},
   9679         address = {France},
   9680         abstract = {We evaluate the anonymity provided by two popular email mix implementations, Mixmaster and Reliable, and compare their effectiveness through the use of simulations which model the algorithms used by these mixing applications. Our simulations are based on actual traffic data obtained from a public anonymous remailer (mix node). We determine that assumptions made in previous literature about the distribution of mix input traffic are incorrect: in particular, the input traffic does not follow a Poisson distribution. We establish for the first time that a lower bound exists on the anonymity of Mixmaster, and discover that under certain circumstances the algorithm used by Reliable provides no anonymity. We find that the upper bound on anonymity provided by Mixmaster is slightly higher than that provided by Reliable.
   9681 We identify flaws in the software in Reliable that further compromise its ability to provide anonymity, and review key areas that are necessary for the security of a mix in addition to a sound algorithm. Our analysis can be used to evaluate under which circumstances the two mixing algorithms should be used to best achieve anonymity and satisfy their purpose. Our work can also be used as a framework for establishing a security review process for mix node deployments},
   9682         isbn = {978-3-540-22987-2},
   9683         doi = {10.1007/b100085},
   9684         url = {http://www.springerlink.com/content/7lvqwn445ty1c7ga/},
   9685         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mixmaster-reliable.pdf},
   9686         author = {Claudia Diaz and Len Sassaman and Evelyne Dewitte}
   9687 }
   9688 @article {2004_1,
   9689         title = {A construction of locality-aware overlay network: mOverlay and its performance},
   9690         journal = {IEEE Journal on Selected Areas in Communications},
   9691         volume = {22},
   9692         year = {2004},
   9693         month = jan,
   9694         pages = {18--28},
   9695         abstract = {There are many research interests in peer-to-peer (P2P) overlay architectures. Most widely used unstructured P2P networks rely on central directory servers or massive message flooding, clearly not scalable. Structured overlay networks based on distributed hash tables (DHT) are expected to eliminate flooding and central servers, but can require many long-haul message deliveries. An important aspect of constructing an efficient overlay network is how to exploit network locality in the underlying network. We propose a novel mechanism, mOverlay, for constructing an overlay network that takes account of the locality of network hosts. The constructed overlay network can significantly decrease the communication cost between end hosts by ensuring that a message reaches its destination with small overhead and very efficient forwarding. To construct the locality-aware overlay network, dynamic landmark technology is introduced. We present an effective locating algorithm for a new host joining the overlay network. We then present a theoretical analysis and simulation results to evaluate the network performance. Our analysis shows that the overhead of our locating algorithm is O(logN), where N is the number of overlay network hosts. Our simulation results show that the average distance between a pair of hosts in the constructed overlay network is only about 11\% of the one in a traditional, randomly connected overlay network. Network design guidelines are also provided. Many large-scale network applications, such as media streaming, application-level multicasting, and media distribution, can leverage mOverlay to enhance their performance},
   9696         www_section = {distributed hash table, flooding attacks, overlay networks, P2P},
   9697         url = {http://kmweb.twbbs.org/drupal/node/13},
   9698         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/6-914.ppt},
   9699         author = {Xin Yan Zhang and Qian Zhang and Zhang, Zhensheng and Gang Song and Wenwu Zhu}
   9700 }
   9701 @conference {1111777,
   9702         title = {Data durability in peer to peer storage systems},
   9703         booktitle = {CCGRID '04: Proceedings of the 2004 IEEE International Symposium on Cluster Computing and the Grid},
   9704         year = {2004},
   9705         pages = {90--97},
   9706         publisher = {IEEE Computer Society},
   9707         organization = {IEEE Computer Society},
   9708         address = {Washington, DC, USA},
   9709         abstract = {In this paper we present a quantitative study of data survival in peer to peer storage systems. We first recall two main redundancy mechanisms: replication and erasure codes, which are used by most peer to peer storage systems like OceanStore, PAST or CFS, to guarantee data durability. Second we characterize peer to peer systems according to a volatility factor (a peer is free to leave the system at anytime) and to an availability factor (a peer is not permanently connected to the system). Third we model the behavior of a system as a Markov chain and analyse the average life time of data (MTTF) according to the volatility and availability factors. We also present the cost of the repair process based on these redundancy schemes to recover failed peers. The conclusion of this study is that when there is no high availability of peers, a simple replication scheme may be more efficient than sophisticated erasure codes},
   9710         www_section = {P2P, redundancy, storage},
   9711         isbn = {0-7803-8430-X},
   9712         url = {http://portal.acm.org/citation.cfm?id=1111777$\#$},
   9713         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.9992.pdf},
   9714         author = {Gil Utard and Antoine Vernois}
   9715 }
   9716 @conference {Garces-Erice2004DataIndexing,
   9717         title = {Data Indexing in Peer-to-Peer DHT Networks},
   9718         booktitle = {Proceedings of the 24th International Conference on Distributed Computing Systems (ICDCS'04)},
   9719         series = {ICDCS '04},
   9720         year = {2004},
   9721         pages = {200--208},
   9722         publisher = {IEEE Computer Society},
   9723         organization = {IEEE Computer Society},
   9724         address = {Washington, DC, USA},
   9725         isbn = {0-7695-2086-3},
   9726         url = {http://dl.acm.org/citation.cfm?id=977400.977979},
   9727         author = {L Garc{\'e}s-Erice and Felber, P. A. and E W Biersack and Urvoy-Keller, G. and Ross, K. W.}
   9728 }
   9729 @mastersthesis {2004_2,
   9730         title = {The Decentralised Coordination of Self-Adaptive Components for Autonomic Distributed Systems},
   9731         volume = {Doctor of Philosophy},
   9732         year = {2004},
   9733         month = oct,
   9734         pages = {0--214},
   9735         school = {University of Dublin},
   9736         address = {Dublin, Ireland},
   9737         www_section = {autonomic distributed system, descentralised coordination},
   9738         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20Autonomic\%20distributed\%20systems.pdf},
   9739         author = {Jim Dowling}
   9740 }
   9741 @conference {1133613,
   9742         title = {Defending against eclipse attacks on overlay networks},
   9743         booktitle = {EW 11: Proceedings of the 11th workshop on ACM SIGOPS European workshop},
   9744         year = {2004},
   9745         pages = {0--21},
   9746         publisher = {ACM},
   9747         organization = {ACM},
   9748         address = {New York, NY, USA},
   9749         abstract = {Overlay networks are widely used to deploy functionality at edge nodes without changing network routers. Each node in an overlay network maintains pointers to a set of neighbor nodes. These pointers are used both to maintain the overlay and to implement application functionality, for example, to locate content stored by overlay nodes. If an attacker controls a large fraction of the neighbors of correct nodes, it can "eclipse" correct nodes and prevent correct overlay operation. This Eclipse attack is more general than the Sybil attack. Attackers can use a Sybil attack to launch an Eclipse attack by inventing a large number of seemingly distinct overlay nodes. However, defenses against Sybil attacks do not prevent Eclipse attacks because attackers may manipulate the overlay maintenance algorithm to mount an Eclipse attack. This paper discusses the impact of the Eclipse attack on several types of overlay and it proposes a novel defense that prevents the attack by bounding the degree of overlay nodes. Our defense can be applied to any overlay and it enables secure implementations of overlay optimizations that choose neighbors according to metrics like proximity. We present preliminary results that demonstrate the importance of defending against the Eclipse attack and show that our defense is effective},
   9750         www_section = {attack, overlay networks},
   9751         doi = {10.1145/1133572.1133613},
   9752         url = {http://portal.acm.org/citation.cfm?id=1133572.1133613$\#$},
   9753         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.5727.pdf},
   9754         author = {Singh, Atul and Miguel Castro and Peter Druschel and Antony Rowstron}
   9755 }
   9756 @conference {Cramer04DemandDrivenClustering,
   9757         title = {Demand-Driven Clustering in MANETs},
   9758         booktitle = {Proceedings of the 2004 International Conference on Wireless Networks (ICWN '04)},
   9759         volume = {1},
   9760         year = {2004},
   9761         pages = {81--87},
   9762         type = {publication},
   9763         address = {Las Vegas, NV},
   9764         abstract = { Many clustering protocols for mobile ad hoc networks (MANETs) have been proposed in the literature. With only one exception so far [1], all these protocols are proactive, thus wasting bandwidth when their function is not currently needed. To reduce the signalling traffic load, reactive clustering may be employed. We have developed a clustering protocol named {\textquotedblleft}On-Demand Group Mobility-Based Clustering {\textquotedblright} (ODGMBC) which is reactive. Its goal is to build clusters as a basis for address autoconfiguration and hierarchical routing. The design process especially addresses the notion of group mobility in a MANET. As a result, ODGMBC maps varying physical node groups onto logical clusters. In this paper, ODGMBC is described. It was implemented for the ad hoc network simulator GloMoSim [2] and evaluated using several performance indicators. Simulation results are promising and show that ODGMBC leads to stable clusters. This stability is advantageous for autoconfiguration and routing mechansims to be employed in conjunction with the clustering algorithm. Index Terms {\textemdash} clustering, multi-hop, reactive, MANET, group mobility},
   9765         www_section = {mobile Ad-hoc networks, multi-hop networks},
   9766         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9767         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04odgmbc.pdf},
   9768         author = {Cramer, Curt and Oliver Stanze and Kilian Weniger and Martina Zitterbart}
   9769 }
   9770 @conference {Hof04SecureDistributedServiceDirectory,
   9771         title = {Design of a Secure Distributed Service Directory for Wireless Sensornetworks},
   9772         booktitle = {Proceedings of the First European Workshop on Wireless Sensor Networks},
   9773         year = {2004},
   9774         type = {publication},
   9775         address = {Berlin, Germany},
   9776         abstract = {Sensor networks consist of a potentially huge number of very small and resource limited self-organizing devices. This paper presents the design of a general distributed service directory architecture for sensor networks which especially focuses on the security issues in sensor networks. It ensures secure construction and maintenance of the underlying storage structure, a Content Addressable Network. It also considers integrity of the distributed service directory and secures communication between service provider and inquirer using self-certifying path names. Key area of application of this architecture are gradually extendable sensor networks where sensors and actuators jointly perform various user defined tasks, e.g., in the field of an office environment},
   9777         www_section = {sensor networks},
   9778         isbn = {978-3-540-20825-9},
   9779         doi = {10.1007/978-3-540-24606-0_19},
   9780         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9781         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scan.pdf},
   9782         author = {Hans-Joachim Hof and Erik-Oliver Blass and Thomas Fuhrmann and Martina Zitterbart}
   9783 }
   9784 @conference {Dabek:2004:DDL:1251175.1251182,
   9785         title = {Designing a DHT for Low Latency and High Throughput},
   9786         booktitle = {NSDI'04--Proceedings of the 1st conference on Symposium on Networked Systems Design and Implementation },
   9787         year = {2004},
   9788         month = mar,
   9789         pages = {7--7},
   9790         publisher = {USENIX Association},
   9791         organization = {USENIX Association},
   9792         address = {San Francisco, CA, USA},
   9793         abstract = {Designing a wide-area distributed hash table (DHT) that provides high-throughput and low-latency network storage is a challenge. Existing systems have explored a range of solutions, including iterative routing, recursive routing, proximity routing and neighbor selection, erasure coding, replication, and server selection.
   9794 
   9795 This paper explores the design of these techniques and their interaction in a complete system, drawing on the measured performance of a new DHT implementation and results from a simulator with an accurate Internet latency model. New techniques that resulted from this exploration include use of latency predictions based on synthetic co-ordinates, efficient integration of lookup routing and data fetching, and a congestion control mechanism suitable for fetching data striped over large numbers of servers.
   9796 
   9797 Measurements with 425 server instances running on 150 PlanetLab and RON hosts show that the latency optimizations reduce the time required to locate and fetch data by a factor of two. The throughput optimizations result in a sustainable bulk read throughput related to the number of DHT hosts times the capacity of the slowest access link; with 150 selected PlanetLab hosts, the peak aggregate throughput over multiple clients is 12.8 megabytes per second},
   9798         www_section = {distributed hash table, high-throughput, latency},
   9799         url = {http://dl.acm.org/citation.cfm?id=1251175.1251182},
   9800         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2704\%20-\%20Designing\%20a\%20DHT\%20for\%20low\%20latency\%20and\%20high\%20throughput.pdf},
   9801         author = {Dabek, Frank and Li, Jinyang and Emil Sit and Robertson, James and Frans M. Kaashoek and Robert Morris}
   9802 }
   9803 @conference {2004_3,
   9804         title = {Designing Incentive mechanisms for peer-to-peer systems},
   9805         booktitle = {GECON 2004. 1st IEEE International Workshop on Grid Economics and Business Models},
   9806         year = {2004},
   9807         month = apr,
   9808         pages = {67--81},
   9809         publisher = {IEEE Computer Society},
   9810         organization = {IEEE Computer Society},
   9811         address = {Seoul, South Corea},
   9812         abstract = {From file-sharing to mobile ad-hoc networks, community networking to application layer overlays, the peer-to-peer networking paradigm promises to revolutionize the way we design, build and use the communications network of tomorrow, transform the structure of the communications industry, and challenge our understanding of markets and democracies in a digital age. The fundamental premise of peer-to-peer systems is that individual peers voluntarily contribute resources to the system. We discuss some of the research opportunities and challenges in the design of incentive mechanisms for P2P systems},
   9813         www_section = {incentives, P2P, peer-to-peer networking},
   9814         isbn = {0-7803-8525-X },
   9815         doi = {10.1109/GECON.2004.1317584 },
   9816         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GECON\%2704\%20-\%20Designing\%20incentive\%20mechanisms\%20for\%20p2p\%20systems.pdf},
   9817         author = {John Chuang}
   9818 }
   9819 @booklet {_digitalfountains:,
   9820   author = {TODO},
   9821         title = {Digital Fountains: A Survey and Look Forward Abstract {\textemdash} We},
   9822         year = {2004},
   9823         abstract = {survey constructions and applications of digital fountains, an abstraction of erasure coding for network communication. Digital fountains effectively change the standard paradigm where a user receives an ordered stream of packets to one where a user must simply receive enough packets in order to obtain the desired data. Obviating the need for ordered data simplifies data delivery, especially when the data is large or is to be distributed to a large number of users. We also examine barriers to the adoption of digital fountains and discuss whether they can be overcome. I},
   9824         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.114.2282},
   9825         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.114.2282.pdf}
   9826 }
   9827 @conference {golle:eurocrypt2004,
   9828         title = {Dining Cryptographers Revisited},
   9829         booktitle = {Proceedings of Eurocrypt 2004},
   9830         year = {2004},
   9831         month = {May},
   9832         publisher = {Springer Berlin / Heidelberg},
   9833         organization = {Springer Berlin / Heidelberg},
   9834         abstract = {Dining cryptographers networks (or DC-nets) are a privacy-preserving primitive devised by Chaum for anonymous message publication. A very attractive feature of the basic DC-net is its non-interactivity. Subsequent to key establishment, players may publish their messages in a single broadcast round, with no player-to-player communication. This feature is not possible in other privacy-preserving tools like mixnets. A drawback to DC-nets, however, is that malicious players can easily jam them, i.e., corrupt or block the transmission of messages from honest parties, and may do so without being traced.
   9835 Several researchers have proposed valuable methods of detecting cheating players in DC-nets. This is usually at the cost, however, of multiple broadcast rounds, even in the optimistic case, and often of high computational and/or communications overhead, particularly for fault recovery.
   9836 We present new DC-net constructions that simultaneously achieve non-interactivity and high-probability detection and identification of cheating players. Our proposals are quite efficient, imposing a basic cost that is linear in the number of participating players. Moreover, even in the case of cheating in our proposed system, just one additional broadcast round suffices for full fault recovery. Among other tools, our constructions employ bilinear maps, a recently popular cryptographic technique for reducing communication complexity},
   9837         www_section = {anonymity, dining cryptographers, non-interactive, privacy},
   9838         isbn = {978-3-540-21935-4},
   9839         doi = {10.1007/b97182},
   9840         url = {http://www.springerlink.com/content/ud2tb1fyk5m2ywlu/},
   9841         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/golle-eurocrypt2004.pdf},
   9842         author = {Philippe Golle and Ari Juels}
   9843 }
   9844 @conference { izal:dissecting,
   9845         title = {Dissecting BitTorrent: Five Months in a Torrent's Lifetime},
   9846         booktitle = {PAM '04. Proceedings of Passive and Active Measurements},
   9847         series = {Lecture Notes in Computer Science},
   9848         volume = {3015},
   9849         year = {2004},
   9850         month = apr,
   9851         pages = {1--11},
   9852         publisher = {Springer},
   9853         organization = {Springer},
   9854         address = {Antibes Juan-les-Pins, France},
   9855         abstract = {Popular content such as software updates is requested by a large number of users. Traditionally, to satisfy a large number of requests, lager server farms or mirroring are used, both of which are expensive. An inexpensive alternative are peer-to-peer based replication systems, where users who retrieve the file, act simultaneously as clients and servers. In this paper, we study BitTorrent, a new and already very popular peer-to-peer application that allows distribution of very large contents to a large set of hosts. Our analysis of BitTorrent is based on measurements collected on a five months long period that involved thousands of peers},
   9856         www_section = {BitTorrent, P2P, peer-to-peer networking, replication system},
   9857         doi = {10.1007/978-3-540-24668-8_1},
   9858         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PAM\%2704\%20-\%20Dissecting\%20bittorrent.pdf},
   9859         author = {Mikel Izal and Guillaume Urvoy-Keller and E W Biersack and Pascal Felber and Anwar Al Hamra and L Garc{\'e}s-Erice}
   9860 }
   9861 @conference {Cramer04Scheduling,
   9862         title = {Distributed Job Scheduling in a Peer-to-Peer Video Recording System},
   9863         booktitle = {Proceedings of the Workshop on Algorithms and Protocols for Efficient Peer-to-Peer Applications (PEPPA) at Informatik 2004},
   9864         year = {2004},
   9865         pages = {234--238},
   9866         type = {publication},
   9867         address = {Ulm, Germany},
   9868         abstract = {Since the advent of Gnutella, Peer-to-Peer (P2P) protocols have matured towards a fundamental design element for large-scale, self-organising distributed systems. Many research efforts have been invested to improve various aspects of P2P systems, like their performance, scalability, and so on. However, little experience has been gathered from the actual deployment of such P2P systems apart from the typical file sharing applications. To bridge this gap and to gain more experience in making the transition from theory to practice, we started building advanced P2P applications whose explicit goal is {\textquotedblleft}to be deployed in the wild{\textquotedblright}. In this paper, we describe a fully decentralised P2P video recording system. Every node in the system is a networked computer (desktop PC or set-top box) capable of receiving and recording DVB-S, i.e. digital satellite TV. Like a normal video recorder, users can program their machines to record certain programmes. With our system, they will be able to schedule multiple recordings in parallel. It is the task of the system to assign the recordings to different machines in the network. Moreover, users can {\textquotedblleft}record broadcasts in the past{\textquotedblright}, i.e. the system serves as a short-term archival storage},
   9869         www_section = {DVB, P2P},
   9870         url = {http://i30www.ira.uka.de/research/publications/p2p/},
   9871         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04scheduling.pdf},
   9872         author = {Cramer, Curt and Kendy Kutzner and Thomas Fuhrmann}
   9873 }
   9874 @conference {mmsec04-Klonowski,
   9875         title = {DUO--Onions and Hydra--Onions -- Failure and Adversary Resistant Onion Protocols},
   9876         booktitle = {Proceedings of the IFIP TC-6 TC-11 Conference on Communications and Multimedia Security 2004},
   9877         year = {2004},
   9878         month = {September},
   9879         publisher = {Springer Boston},
   9880         organization = {Springer Boston},
   9881         abstract = {A serious weakness of the onion protocol, one of the major tools for anonymous communication, is its vulnerability to network failures and/or an adversary trying to break the communication. This is facilitated by the fact that each message is sent through a path of a certain length and a failure in a single point of this path prohibits message delivery. Since the path cannot be too short in order to offer anonymity protection (at least logarithmic in the number of nodes), the failure probability might be quite substantial.
   9882 The simplest solution to this problem would be to send many onions with the same message. We show that this approach can be optimized with respect to communication overhead and resilience to failures and/or adversary attacks. We propose two protocols: the first one mimics K independent onions with a single onion. The second protocol is designed for the case where an adaptive adversary may destroy communication going out of servers chosen according to the traffic observed by him. In this case a single message flows in a stream of K onions {\textemdash} the main point is that even when the adversary kills some of these onions, the stream quickly recovers to the original bandwidth {\textemdash} again K onions with this message would flow through the network},
   9883         www_section = {adaptive adversary, anonymity, onion routing},
   9884         isbn = {978-0-387-24485-3},
   9885         doi = {10.1007/b105674},
   9886         url = {http://www.springerlink.com/content/019lu6xp5b9fctn8/},
   9887         author = {Jan Iwanik and Marek Klonowski and Miroslaw Kutylowski}
   9888 }
   9889 @conference {Danezis04theeconomics,
   9890         title = {The Economics of Censorship Resistance},
   9891         booktitle = {In The Third Annual Workshop on Economics and Information Security (WEIS04},
   9892         year = {2004},
   9893         abstract = {We propose the first economic model of censorship resistance. Early peer-to-peer systems, such as the Eternity Service, sought to achieve censorshop resistance by distributing content randomly over the whole Internet. An alternative approach is to encourage nodes to serve resources they are interested in. Both architectures have been implemented but so far there has been no quantitative analysis of the protection they provide. We develop a model inspired by economics and con
   9894 ict theory to analyse these systems. Under our assumptions, resource distribution according to nodes' individual preferences provides better stability and resistance to censorship. Our results may have wider application too},
   9895         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.7003\&rep=rep1\&type=pdf},
   9896         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.7003\%20\%281\%29.pdf},
   9897         author = {George Danezis and Ross Anderson}
   9898 }
   9899 @book {2004_4,
   9900         title = {Efficient Private Matching and Set Intersection},
   9901         booktitle = {Advances in Cryptology--EUROCRYPT 2004},
   9902         series = {Lecture Notes in Computer Science},
   9903         volume = {3027},
   9904         year = {2004},
   9905         pages = {1--19},
   9906         publisher = {Springer Berlin Heidelberg},
   9907         organization = {Springer Berlin Heidelberg},
   9908         abstract = {We consider the problem of computing the intersection of private datasets of two parties, where the datasets contain lists of elements taken from a large domain. This problem has many applications for online collaboration. We present protocols, based on the use of homomorphic encryption and balanced hashing, for both semi-honest and malicious environments. For lists of length k, we obtain O(k) communication overhead and O(k ln ln k) computation. The protocol for the semi-honest environment is secure in the standard model, while the protocol for the malicious environment is secure in the random oracle model. We also consider the problem of approximating the size of the intersection, show a linear lower-bound for the communication overhead of solving this problem, and provide a suitable secure protocol. Lastly, we investigate other variants of the matching problem, including extending the protocol to the multi-party setting as well as considering the problem of approximate matching},
   9909         isbn = {978-3-540-21935-4},
   9910         doi = {10.1007/978-3-540-24676-3_1},
   9911         url = {http://dx.doi.org/10.1007/978-3-540-24676-3_1},
   9912         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EffecitvePrivateMatching2004Freedman.pdf},
   9913         author = {Freedman, MichaelJ. and Nissim, Kobbi and Pinkas, Benny},
   9914         editor = {Cachin, Christian and Camenisch, Jan L}
   9915 }
   9916 @conference {Helmy04efficientresource,
   9917         title = {Efficient Resource Discovery in Wireless AdHoc Networks: Contacts Do Help},
   9918         booktitle = {Chapter in: Resource Management in Wireless Networking},
   9919         year = {2004},
   9920         publisher = {Kluwer Academic Publishers},
   9921         organization = {Kluwer Academic Publishers},
   9922         abstract = {The resource discovery problem poses new challenges in infrastructure-less wireless networks. Due to the highly dynamic nature of these networks and their bandwidth and energy constraints, there is a pressing need for energy-aware communicationefficient resource discovery protocols. This chapter provides an overview of several approaches to resource discovery, discussing their suitability for classes of wireless networks. The approaches discussed in this chapter include flooding-based approaches, hierarchical cluster-based and dominating set schemes, and hybrid loose hierarchy architectures. Furthermore, the chapter provides a detailed case study on the design, evaluation and analysis of an energy-efficient resource discovery protocol based on hybrid loose hierarchy and utilizing the concept of {\textquoteleft}contacts'},
   9923         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.76.9310},
   9924         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.76.9310.pdf},
   9925         author = {Ahmed Helmy}
   9926 }
   9927 @conference {mrkoot:sirer04,
   9928         title = {Eluding carnivores: file sharing with strong anonymity},
   9929         booktitle = {Proceedings of the 11th Workshop on ACM SIGOPS European Workshop: Beyond the PC (EW11)},
   9930         year = {2004},
   9931         publisher = {ACM Press},
   9932         organization = {ACM Press},
   9933         address = {New York, NY, USA},
   9934         www_section = {anonymity, file-sharing, overlay networks},
   9935         doi = {10.1145/1133572.1133611},
   9936         url = {http://dx.doi.org/10.1145/1133572.1133611},
   9937         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herbivore-esigops.pdf},
   9938         author = {Emin G{\"u}n Sirer and Goel, Sharad and Mark Robson and Engin, Dogan}
   9939 }
   9940 @conference {1013317,
   9941         title = {Energy-aware demand paging on NAND flash-based embedded storages},
   9942         booktitle = {ISLPED '04: Proceedings of the 2004 international symposium on Low power electronics and design},
   9943         year = {2004},
   9944         pages = {338--343},
   9945         publisher = {ACM},
   9946         organization = {ACM},
   9947         address = {New York, NY, USA},
   9948         abstract = {The ever-increasing requirement for high-performance and huge-capacity memories of emerging embedded applications has led to the widespread adoption of SDRAM and NAND flash memory as main and secondary memories, respectively. In particular, the use of energy consuming memory, SDRAM, has become burdensome in battery-powered embedded systems. Intuitively, though demand paging can be used to mitigate the increasing requirement of main memory size, its applicability should be deliberately elaborated since NAND flash memory has asymmetric operation characteristics in terms of performance and energy consumption.In this paper, we present energy-aware demand paging technique to lower the energy consumption of embedded systems considering the characteristics of interactive embedded applications with large memory footprints. We also propose a flash memory-aware page replacement policy that can reduce the number of write and erase operations in NAND flash memory. With real-life workloads, we show the system-wide Energy{\textperiodcentered}Delay can be reduced by 15~30\% compared to the traditional shadowing architecture},
   9949         isbn = {1-58113-929-2},
   9950         doi = {10.1145/1013235.1013317},
   9951         url = {http://doi.acm.org/10.1145/1013235.1013317},
   9952         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2004-ISLPED-Energy-aware\%20demand\%20paging\%20on\%20NAND\%20flash-based\%20embedded\%20storages.pdf},
   9953         author = {Chanik Park and Kang, Jeong-Uk and Park, Seon-Yeong and Kim, Jin-Soo}
   9954 }
   9955 @conference {1251279,
   9956         title = {Energy-efficiency and storage flexibility in the blue file system},
   9957         booktitle = {OSDI'04: Proceedings of the 6th conference on Symposium on Opearting Systems Design \& Implementation},
   9958         year = {2004},
   9959         pages = {25--25},
   9960         publisher = {USENIX Association},
   9961         organization = {USENIX Association},
   9962         address = {Berkeley, CA, USA},
   9963         abstract = {A fundamental vision driving pervasive computing research is access to personal and shared data anywhere at anytime. In many ways, this vision is close to being realized. Wireless networks such as 802.11 offer connectivity to small, mobile devices. Portable storage, such as mobile disks and USB keychains, let users carry several gigabytes of data in their pockets. Yet, at least three substantial barriers to pervasive data access remain. First, power-hungry network and storage devices tax the limited battery capacity of mobile computers. Second, the danger of viewing stale data or making inconsistent updates grows as objects are replicated across more computers and portable storage devices. Third, mobile data access performance can suffer due to variable storage access times caused by dynamic power management, mobility, and use of heterogeneous storage devices. To overcome these barriers, we have built a new distributed file system called BlueFS. Compared to the Coda file system, BlueFS reduces file system energy usage by up to 55\% and provides up to 3 times faster access to data replicated on portable storage},
   9964         www_section = {802.11, file systems},
   9965         url = {http://portal.acm.org/citation.cfm?id=1251279$\#$},
   9966         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nightingale-bluefs2004.pdf},
   9967         author = {Nightingale, Edmund B. and Flinn, Jason}
   9968 }
   9969 @article {2004_5,
   9970         title = {Enhancing Web privacy and anonymity in the digital era},
   9971         journal = {Information Management \& Computer Security},
   9972         volume = {12},
   9973         year = {2004},
   9974         month = jan,
   9975         pages = {255--287},
   9976         type = {survey},
   9977         abstract = {This paper presents a state-of-the-art review of the Web privacy and anonymity enhancing security mechanisms, tools, applications and services, with respect to their architecture, operational principles and vulnerabilities. Furthermore, to facilitate a detailed comparative analysis, the appropriate parameters have been selected and grouped in classes of comparison criteria, in the form of an integrated comparison framework. The main concern during the design of this framework was to cover the confronted security threats, applied technological issues and users' demands satisfaction. GNUnet's Anonymity Protocol (GAP), Freedom, Hordes, Crowds, Onion Routing, Platform for Privacy Preferences (P3P), TRUSTe, Lucent Personalized Web Assistant (LPWA), and Anonymizer have been reviewed and compared. The comparative review has clearly highlighted that the pros and cons of each system do not coincide, mainly due to the fact that each one exhibits different design goals and thus adopts dissimilar techniques for protecting privacy and anonymity},
   9978         www_section = {anonymity, GNUnet, onion routing},
   9979         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p255.pdf},
   9980         author = {Stefanos Gritzalis}
   9981 }
   9982 @conference {1021938,
   9983         title = {Erasure Code Replication Revisited},
   9984         booktitle = {P2P '04: Proceedings of the Fourth International Conference on Peer-to-Peer Computing},
   9985         year = {2004},
   9986         pages = {90--97},
   9987         publisher = {IEEE Computer Society},
   9988         organization = {IEEE Computer Society},
   9989         address = {Washington, DC, USA},
   9990         abstract = {Erasure coding is a technique for achieving high availability and reliability in storage and communication systems. In this paper, we revisit the analysis of erasure code replication and point out some situations when whole-file replication is preferred. The switchover point (from preferring whole-file replication to erasure code replication) is studied, and characterized using asymptotic analysis. We also discuss the additional considerations in building erasure code replication systems},
   9991         isbn = {0-7695-2156-8},
   9992         doi = {10.1109/P2P.2004.17},
   9993         url = {http://portal.acm.org/citation.cfm?id=1021938$\#$},
   9994         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.109.2034.pdf},
   9995         author = {Lin, W. K. and Chiu, Dah Ming and Lee, Y. B.}
   9996 }
   9997 @booklet {You04evaluationof,
   9998         title = {Evaluation of Efficient Archival Storage Techniques},
   9999         year = {2004},
  10000         abstract = {The ever-increasing volume of archival data that need to be retained for long periods of time has motivated the design of low-cost, high-efficiency storage systems. Inter-file compression has been proposed as a technique to improve storage efficiency by exploiting the high degree of similarity among archival data. We evaluate the two main inter-file compression techniques, data chunking and delta encoding, and compare them with traditional intra-file compression. We report on experimental results from a range of representative archival data sets},
  10001         www_section = {compression, storage},
  10002         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.1341},
  10003         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.11.1341.pdf},
  10004         author = {Lawrence L. You and Christos Karamanolis}
  10005 }
  10006 @conference {Karp2004/ALGO,
  10007         title = {Finite length analysis of LT codes},
  10008         booktitle = {Proceedings of the IEEE International Symposium on Information Theory, ISIT 2004},
  10009         year = {2004},
  10010         month = jan,
  10011         pages = {0--39},
  10012         abstract = {This paper provides an efficient method for analyzing the error probability of the belief propagation (BP) decoder applied to LT Codes. Each output symbol is generated independently by sampling from a distribution and adding the input symbols corresponding to the support of the sampled vector},
  10013         www_section = {algoweb_ldpc},
  10014         isbn = {0-7695-1822-2},
  10015         doi = {10.1109/ISIT.2004.1365074},
  10016         url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1181950},
  10017         author = {Richard Karp and Luby, Michael and M. Amin Shokrollahi}
  10018 }
  10019 @conference {flow-correlation04,
  10020         title = {On Flow Correlation Attacks and Countermeasures in Mix Networks},
  10021         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10022         series = {LNCS},
  10023         volume = {3424},
  10024         year = {2004},
  10025         month = {May},
  10026         pages = {207--225},
  10027         publisher = {Springer Berlin / Heidelberg},
  10028         organization = {Springer Berlin / Heidelberg},
  10029         abstract = {In this paper, we address issues related to flow correlation attacks and the corresponding countermeasures in mix networks. Mixes have been used in many anonymous communication systems and are supposed to provide countermeasures that can defeat various traffic analysis attacks. In this paper, we focus on a particular class of traffic analysis attack, flow correlation attacks, by which an adversary attempts to analyze the network traffic and correlate the traffic of a flow over an input link at a mix with that over an output link of the same mix. Two classes of correlation methods are considered, namely time-domain methods and frequency-domain methods. Based on our threat model and known strategies in existing mix networks, we perform extensive experiments to analyze the performance of mixes. We find that a mix with any known batching strategy may fail against flow correlation attacks in the sense that for a given flow over an input link, the adversary can correctly determine which output link is used by the same flow. We also investigated methods that can effectively counter the flow correlation attack and other timing attacks. The empirical results provided in this paper give an indication to designers of Mix networks about appropriate configurations and alternative mechanisms to be used to counter flow correlation attacks.
  10030 This work was supported in part by the National Science Foundation under Contracts 0081761 and 0324988, by the Defense Advanced Research Projects Agency under Contract F30602-99-1-0531, and by Texas A\&M University under its Telecommunication and Information Task Force Program. Any opinions, findings, and conclusions or recommendations in this material, either expressed or implied, are those of the authors and do not necessarily reflect the views of the sponsors listed above},
  10031         www_section = {flow correlation attack},
  10032         isbn = {978-3-540-26203-9},
  10033         doi = {10.1007/b136164},
  10034         url = {http://www.springerlink.com/content/kej7uwxee8h71p81/},
  10035         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/flow-correlation04.pdf},
  10036         author = {Ye Zhu and Xinwen Fu and Bryan Graham and Riccardo Bettati and Wei Zhao}
  10037 }
  10038 @conference {esorics04-mauw,
  10039         title = {A formalization of anonymity and onion routing},
  10040         booktitle = {Proceedings of ESORICS 2004},
  10041         year = {2004},
  10042         pages = {109--124},
  10043         publisher = {LNCS 3193},
  10044         organization = {LNCS 3193},
  10045         address = {Sophia Antipolis},
  10046         abstract = {The use of formal methods to verify security protocols with respect to secrecy and authentication has become standard practice. In contrast, the formalization of other security goals, such as privacy, has received less attention. Due to the increasing importance of privacy in the current society, formal methods will also become indispensable in this area. Therefore, we propose a formal definition of the notion of anonymity in presence of an observing intruder. We validate this definition by analyzing a well-known anonymity preserving protocol, viz. onion routing},
  10047         www_section = {anonymity, onion routing, privacy},
  10048         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.75.2547},
  10049         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/esorics04-mauw.pdf},
  10050         author = {Sjouke Mauw and Jan Verschuren and Erik P. de Vink}
  10051 }
  10052 @conference {reiter:ccs2004,
  10053         title = {Fragile Mixing},
  10054         booktitle = {Proceedings of the 11th ACM Conference on Computer and Communications Security (CCS 2004)},
  10055         year = {2004},
  10056         month = {October},
  10057         publisher = {ACM Press},
  10058         organization = {ACM Press},
  10059         address = {Washington DC, USA},
  10060         abstract = {No matter how well designed and engineered, a mix server offers little protection if its administrator can be convinced to log and selectively disclose correspondences between its input and output messages, either for profit or to cooperate with an investigation. In this paper we propose a technique, fragile mixing, to discourage an administrator from revealing such correspondences, assuming he is motivated to protect the unlinkability of other communications that flow through the mix (e.g., his own). Briefly, fragile mixing implements the property that any disclosure of an input-message-to-output-message correspondence discloses all such correspondences for that batch of output messages. We detail this technique in the context of a re-encryption mix, its integration with a mix network, and incentive and efficiency issues},
  10061         www_section = {anonymity, mix, privacy, unlinkability},
  10062         isbn = {1-58113-961-6},
  10063         doi = {10.1145/1030083.1030114},
  10064         url = {http://portal.acm.org/citation.cfm?id=1030114},
  10065         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reiter-ccs2004.pdf},
  10066         author = {Michael K. Reiter and XiaoFeng Wang}
  10067 }
  10068 @conference {Feldman:2004:FWP:1016527.1016539,
  10069         title = {Free-riding and whitewashing in peer-to-peer systems},
  10070         booktitle = {PINS'04. Proceedings of the ACM SIGCOMM Workshop on Practice and Theory of Incentives in Networked Systems},
  10071         series = {PINS '04},
  10072         year = {2004},
  10073         month = aug,
  10074         pages = {228--236},
  10075         publisher = {ACM},
  10076         organization = {ACM},
  10077         address = {Portland, OR},
  10078         abstract = {We develop a model to study the phenomenon of free-riding in peer-to-peer (P2P) systems. At the heart of our model is a user of a certain type, an intrinsic and private parameter that reflects the user's willingness to contribute resources to the system. A user decides whether to contribute or free-ride based on how the current contribution cost in the system compares to her type. When the societal generosity (i.e., the average type) is low, intervention is required in order to sustain the system. We present the effect of mechanisms that exclude low type users or, more realistic, penalize free-riders with degraded service. We also consider dynamic scenarios with arrivals and departures of users, and with whitewashers: users who leave the system and rejoin with new identities to avoid reputational penalties. We find that when penalty is imposed on all newcomers in order to avoid whitewashing, system performance degrades significantly only when the turnover rate among users is high},
  10079         www_section = {cheap pseudonyms, cooperation, equilibrium, exclusion, free-riding, identity cost, incentives, peer-to-peer networking, whitewashing},
  10080         isbn = {1-58113-942-X},
  10081         doi = {http://doi.acm.org/10.1145/1016527.1016539},
  10082         url = {http://doi.acm.org/10.1145/1016527.1016539},
  10083         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PINS\%2704\%20-\%20\%20Free-riding\%20and\%20whitewashing\%20in\%20P2P\%20systems.pdf},
  10084         author = {Michal Feldman and Papadimitriou, Christos and John Chuang and Ion Stoica}
  10085 }
  10086 @book {2004_6,
  10087         title = {Group Spreading: A Protocol for Provably Secure Distributed Name Service},
  10088         booktitle = {Automata, Languages and Programming},
  10089         series = {Lecture Notes in Computer Science},
  10090         volume = {3142},
  10091         year = {2004},
  10092         pages = {183--195},
  10093         publisher = {Springer Berlin Heidelberg},
  10094         organization = {Springer Berlin Heidelberg},
  10095         isbn = {978-3-540-22849-3},
  10096         doi = {10.1007/978-3-540-27836-8_18},
  10097         url = {http://dx.doi.org/10.1007/978-3-540-27836-8_18},
  10098         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p_icalp04_0.pdf},
  10099         author = {Awerbuch, Baruch and Scheideler, Christian},
  10100         editor = {D{\'\i}az, Josep and Karhum{\"a}ki, Juhani and Lepist{\"o}, Arto and Sannella, Donald}
  10101 }
  10102 @conference {hitting-set04,
  10103         title = {The Hitting Set Attack on Anonymity Protocols},
  10104         booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)},
  10105         series = {LNCS},
  10106         year = {2004},
  10107         month = {May},
  10108         address = {Toronto},
  10109         abstract = {A passive attacker can compromise a generic anonymity protocol by applying the so called disclosure attack, i.e. a special traffic analysis attack. In this work we present a more efficient way to accomplish this goal, i.e. we need less observations by looking for unique minimal hitting sets. We call this the hitting set attack or just HS-attack.
  10110 In general, solving the minimal hitting set problem is NP-hard. Therefore, we use frequency analysis to enhance the applicability of our attack. It is possible to apply highly efficient backtracking search algorithms. We call this approach the statistical hitting set attack or SHS-attack.
  10111 However, the statistical hitting set attack is prone to wrong solutions with a given small probability. We use here duality checking algorithms to resolve this problem. We call this final exact attack the HS*-attack},
  10112         www_section = {anonymity, hitting set attack, traffic analysis},
  10113         doi = {10.1007/b104759},
  10114         url = {http://www.springerlink.com/content/t6bkk4tyjvr71m55/},
  10115         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hitting-set04.pdf},
  10116         author = {Dogan Kesdogan and Lexi Pimenidis}
  10117 }
  10118 @conference {koepsell:wpes2004,
  10119         title = {How to Achieve Blocking Resistance for Existing Systems Enabling Anonymous Web Surfing},
  10120         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2004)},
  10121         year = {2004},
  10122         month = {October},
  10123         publisher = {ACM  New York, NY, USA},
  10124         organization = {ACM  New York, NY, USA},
  10125         address = {Washington, DC, USA},
  10126         abstract = {We are developing a blocking resistant, practical and usable system for anonymous web surfing. This means, the system tries to provide as much reachability and availability as possible, even to users in countries where the free flow of information is legally, organizationally and physically restricted. The proposed solution is an add-on to existing anonymity systems. First we give a classification of blocking criteria and some general countermeasures. Using these techniques, we outline a concrete design, which is based on the JAP-Web Mixes (aka AN.ON)},
  10127         www_section = {blocking resistance, JAP, mix},
  10128         isbn = {1-58113-968-3},
  10129         doi = {10.1145/1029179.1029197},
  10130         url = {http://portal.acm.org/citation.cfm?id=1029179.1029197},
  10131         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koepsell-wpes2004_0.pdf},
  10132         author = {Stefan K{\"o}psell and Ulf Hilling}
  10133 }
  10134 @conference {fairbrother:pet2004,
  10135         title = {An Improved Construction for Universal Re-encryption},
  10136         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10137         series = {LNCS},
  10138         volume = {3424},
  10139         year = {2004},
  10140         month = {May},
  10141         pages = {79--87},
  10142         publisher = {Springer Berlin / Heidelberg},
  10143         organization = {Springer Berlin / Heidelberg},
  10144         abstract = {Golle et al recently introduced universal re-encryption, defining it as re-encryption by a player who does not know the key used for the original encryption, but which still allows an intended player to recover the plaintext. Universal re-encryption is potentially useful as part of many information-hiding techniques, as it allows any player to make ciphertext unidentifiable without knowing the key used.
  10145 Golle et al's techniques for universal re-encryption are reviewed, and a hybrid universal re-encryption construction with improved work and space requirements which also permits indefinite re-encryptions is presented. Some implementational issues and optimisations are discussed},
  10146         www_section = {information hiding, re-encryption},
  10147         isbn = {978-3-540-26203-9},
  10148         doi = {10.1007/b136164},
  10149         url = {http://www.springerlink.com/content/q07439n27u1egx0w/},
  10150         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fairbrother-pet2004.pdf},
  10151         author = {Peter Fairbrother}
  10152 }
  10153 @article {modular-approach,
  10154         title = {Information Hiding, Anonymity and Privacy: A Modular Approach},
  10155         journal = {Journal of Computer Security},
  10156         volume = {12},
  10157         number = {1},
  10158         year = {2004},
  10159         pages = {3--36},
  10160         abstract = {We propose a new specification framework for information hiding properties such as anonymity and privacy. The framework is based on the concept of a function view, which is a concise representation of the attacker's partial knowledge about a function. We describe system behavior as a set of functions, and formalize different information hiding properties in terms of views of these functions. We present an extensive case study, in which we use the function view framework to systematically classify and rigorously define a rich domain of identity-related properties, and to demonstrate that privacy and anonymity are independent.
  10161 
  10162 The key feature of our approach is its modularity. It yields precise, formal specifications of information hiding properties for any protocol formalism and any choice of the attacker model as long as the latter induce an observational equivalence relation on protocol instances. In particular, specifications based on function views are suitable for any cryptographic process calculus that defines some form of indistinguishability between processes. Our definitions of information hiding properties take into account any feature of the security model, including probabilities, random number generation, timing, etc., to the extent that it is accounted for by the formalism in which the system is specified},
  10163         www_section = {anonymity, information hiding, privacy},
  10164         issn = {0926-227X},
  10165         url = {http://portal.acm.org/citation.cfm?id=1297694},
  10166         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shmat_anon.pdf},
  10167         author = {Dominic Hughes and Vitaly Shmatikov}
  10168 }
  10169 @conference {1096703,
  10170         title = {Integrating Portable and Distributed Storage},
  10171         booktitle = {FAST '04: Proceedings of the 3rd USENIX Conference on File and Storage Technologies},
  10172         year = {2004},
  10173         pages = {227--238},
  10174         publisher = {USENIX Association},
  10175         organization = {USENIX Association},
  10176         address = {Berkeley, CA, USA},
  10177         abstract = {We describe a technique called lookaside caching that combines the strengths of distributed file systems and portable storage devices, while negating their weaknesses. In spite of its simplicity, this technique proves to be powerful and versatile. By unifying distributed storage and portable storage into a single abstraction, lookaside caching allows users to treat devices they carry as merely performance and availability assists for distant file servers. Careless use of portable storage has no catastrophic consequences. Experimental results show that significant performance improvements are possible even in the presence of stale data on the portable device},
  10178         www_section = {caching proxies, distributed database},
  10179         url = {http://portal.acm.org/citation.cfm?id=1096703$\#$},
  10180         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/integratingpds-fast04.pdf},
  10181         author = {Niraj Tolia and Harkes, Jan and Michael Kozuch and Satyanarayanan, Mahadev}
  10182 }
  10183 @article {987233,
  10184         title = {Internet indirection infrastructure},
  10185         journal = {IEEE/ACM Trans. Netw},
  10186         volume = {12},
  10187         number = {2},
  10188         year = {2004},
  10189         pages = {205--218},
  10190         publisher = {IEEE Press},
  10191         address = {Piscataway, NJ, USA},
  10192         abstract = {Attempts to generalize the Internet's point-to-point communication abstraction to provide services like multicast, anycast, and mobility have faced challenging technical problems and deployment barriers. To ease the deployment of such services, this paper proposes a general, overlay-based Internet Indirection Infrastructure (i3) that offers a rendezvous-based communication abstraction. Instead of explicitly sending a packet to a destination, each packet is associated with an identifier; this identifier is then used by the receiver to obtain delivery of the packet. This level of indirection decouples the act of sending from the act of receiving, and allows i3 to efficiently support a wide variety of fundamental communication services. To demonstrate the feasibility of this approach, we have designed and built a prototype based on the Chord lookup protocol},
  10193         www_section = {indirection, mobility, multicast, network infrastructure, service composition},
  10194         issn = {1063-6692},
  10195         doi = {10.1109/TNET.2004.826279},
  10196         url = {http://portal.acm.org/citation.cfm?id=987233$\#$},
  10197         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/i3.pdf},
  10198         author = {Ion Stoica and Adkins, Daniel and Shelley Zhuang and S Shenker and Surana, Sonesh}
  10199 }
  10200 @book {2004_7,
  10201         title = {An Introduction to Auction Theory},
  10202         year = {2004},
  10203         pages = {0--199},
  10204         publisher = {Oxford University Press},
  10205         organization = {Oxford University Press},
  10206         edition = {1st},
  10207         abstract = {This book presents an in-depth discussion of the auction theory. It introduces the concept of Bayesian Nash equilibrium and the idea of studying auctions as games. Private, common, and affiliated values models and multi-object auction models are described. A general version of the Revenue Equivalence Theorem is derived and the optimal auction is characterized to relate the field of mechanism design to auction theory},
  10208         www_section = {affiliated values model, auction theory, Bayesian Nash equilibrium, common values model, multiple objects, private values model, Revenue Equivalence Theorem},
  10209         isbn = {9780199275984},
  10210         doi = {10.1093/019927598X.001.0001},
  10211         url = {http://www.oxfordscholarship.com/view/10.1093/019927598X.001.0001/acprof-9780199275984},
  10212         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Menezes\%20\%26\%20Monteiro\%20-\%20An\%20Introduction\%20to\%20Auction\%20Theory.pdf},
  10213         author = {Flavio M. Menezes and Paulo K. Monteiro}
  10214 }
  10215 @mastersthesis {Amnefelt04keso-,
  10216         title = {Keso--a Scalable, Reliable and Secure Read/Write Peer-to-Peer File System},
  10217         year = {2004},
  10218         month = may,
  10219         pages = {0--77},
  10220         school = {KTH/Royal Institute of Technology},
  10221         type = {Master's Thesis},
  10222         address = {Stockholm},
  10223         abstract = {In this thesis we present the design of Keso, a distributed and completely decentralized file system based on the peer-to-peer overlay network DKS. While designing Keso we have taken into account many of the problems that exist in today's distributed file systems.
  10224 Traditionally, distributed file systems have been built around dedicated file servers which often use expensive hardware to minimize the risk of breakdown and to handle the load. System administrators are required to monitor the load and disk usage of the file servers and to manually add clients and servers to the system.
  10225 
  10226 Another drawback with centralized file systems are that a lot of storage space is unused on clients. Measurements we have taken on existing computer systems has shown that a large part of the storage capacity of workstations is unused. In the system we looked at there was three times as much storage space available on workstations than was stored in the distributed file system. We have also shown that much data stored in a production use distributed file system is redundant.
  10227 
  10228 The main goals for the design of Keso has been that it should make use of spare resources, avoid storing unnecessarily redundant data, scale well, be self-organizing and be a secure file system suitable for a real world environment.
  10229 
  10230 By basing Keso on peer-to-peer techniques it becomes highly scalable, fault tolerant and self-organizing. Keso is intended to run on ordinary workstations and can make use of the previously unused storage space. Keso also provides means for access control and data privacy despite being built on top of untrusted components. The file system utilizes the fact that a lot of data stored in traditional file systems is redundant by letting all files that contains a datablock with the same contents reference the same datablock in the file system. This is achieved while still maintaining access control and data privacy},
  10231         www_section = {decentralized file system, DKS, Keso},
  10232         url = {http://mattias.amnefe.lt/keso/},
  10233         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Amnefelt\%20\%26\%20Svenningsson\%20-\%20Keso.pdf},
  10234         author = {Mattias Amnefelt and Johanna Svenningsson}
  10235 }
  10236 @booklet {Yu04leopard:a,
  10237         title = {Leopard: A locality-aware peer-to-peer system with no hot spot},
  10238         year = {2004},
  10239         publisher = {In: the 4th IFIP Networking Conference (Networking'05)},
  10240         abstract = {A fundamental challenge in Peer-To-Peer (P2P) systems is how to locate objects of interest, namely, the look-up service problem. A key break-through towards a scalable and distributed solution of this problem is the distributed hash},
  10241         www_section = {distributed hash table, P2P},
  10242         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.134.3912},
  10243         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM05_Poster.pdf},
  10244         author = {Yinzhe Yu and Sanghwan Lee and Zhi-li Zhang}
  10245 }
  10246 @conference {feamster:wpes2004,
  10247         title = {Location Diversity in Anonymity Networks},
  10248         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2004)},
  10249         year = {2004},
  10250         month = {October},
  10251         address = {Washington, DC, USA},
  10252         abstract = {Anonymity networks have long relied on diversity of node location for protection against attacks---typically an adversary who can observe a larger fraction of the network can launch a more effective attack. We investigate the diversity of two deployed anonymity networks, Mixmaster and Tor, with respect to an adversary who controls a single Internet administrative domain.
  10253 
  10254 Specifically, we implement a variant of a recently proposed technique that passively estimates the set of administrative domains (also known as autonomous systems, or ASes) between two arbitrary end-hosts without having access to either end of the path. Using this technique, we analyze the AS-level paths that are likely to be used in these anonymity networks. We find several cases in each network where multiple nodes are in the same administrative domain. Further, many paths between nodes, and between nodes and popular endpoints, traverse the same domain},
  10255         www_section = {anonymity, autonomous systems},
  10256         doi = {10.1145/1029179.1029199},
  10257         url = {http://portal.acm.org/citation.cfm?id=1029199},
  10258         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.6119.pdf},
  10259         author = {Nick Feamster and Roger Dingledine}
  10260 }
  10261 @conference {1251195,
  10262         title = {MACEDON: methodology for automatically creating, evaluating, and designing overlay networks},
  10263         booktitle = {NSDI'04: Proceedings of the 1st conference on Symposium on Networked Systems Design and Implementation},
  10264         year = {2004},
  10265         pages = {20--20},
  10266         publisher = {USENIX Association},
  10267         organization = {USENIX Association},
  10268         address = {Berkeley, CA, USA},
  10269         abstract = {Currently, researchers designing and implementing large-scale overlay services employ disparate techniques at each stage in the production cycle: design, implementation, experimentation, and evaluation. As a result, complex and tedious tasks are often duplicated leading to ineffective resource use and difficulty in fairly comparing competing algorithms. In this paper, we present MACEDON, an infrastructure that provides facilities to: i) specify distributed algorithms in a concise domain-specific language; ii) generate code that executes in popular evaluation infrastructures and in live networks; iii) leverage an overlay-generic API to simplify the interoperability of algorithm implementations and applications; and iv) enable consistent experimental evaluation. We have used MACEDON to implement and evaluate a number of algorithms, including AMMO, Bullet, Chord, NICE, Overcast, Pastry, Scribe, and SplitStream, typically with only a few hundred lines of MACEDON code. Using our infrastructure, we are able to accurately reproduce or exceed published results and behavior demonstrated by current publicly available implementations},
  10270         url = {http://portal.acm.org/citation.cfm?id=1251175.1251195$\#$},
  10271         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.2.8796.pdf},
  10272         author = {Rodriguez, Adolfo and Killian, Charles and Bhat, Sooraj and Kosti{\'c}, Dejan and Vahdat, Amin}
  10273 }
  10274 @conference {TH04,
  10275         title = {Measuring Anonymity in a Non-adaptive, Real-time System},
  10276         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10277         series = {Springer-Verlag, LNCS},
  10278         volume = {3424},
  10279         year = {2004},
  10280         pages = {226--241},
  10281         abstract = {Anonymous message transmission should be a key feature in network architectures ensuring that delivered messages are impossible-or at least infeasible-to be traced back to their senders. For this purpose the formal model of the non-adaptive, real-time PROB-channel will be introduced. In this model attackers try to circumvent applied protection measures and to link senders to delivered messages. In order to formally measure the level of anonymity provided by the system, the probability will be given, with which observers can determine the senders of delivered messages (source-hiding property) or the recipients of sent messages (destination-hiding property). In order to reduce the certainty of an observer, possible counter-measures will be defined that will ensure specified upper limit for the probability with which an observer can mark someone as the sender or recipient of a message. Finally results of simulations will be shown to demonstrate the strength of the techniques},
  10282         isbn = {3-540-26203-2},
  10283         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.77.851},
  10284         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TH04.pdf},
  10285         author = {Gergely T{\'o}th and Zolt{\'a}n Horn{\'a}k}
  10286 }
  10287 @conference {THV04,
  10288         title = {Measuring Anonymity Revisited},
  10289         booktitle = {Proceedings of the Ninth Nordic Workshop on Secure IT Systems},
  10290         year = {2004},
  10291         month = {November},
  10292         pages = {85--90},
  10293         address = {Espoo, Finland},
  10294         abstract = {Anonymous message transmission systems are the building blocks of several high-level anonymity services (e.g. epayment, e-voting). Therefore, it is essential to give a theoretically based but also practically usable objective numerical measure for the provided level of anonymity. In this paper two entropybased anonymity measures will be analyzed and some shortcomings of these methods will be highlighted. Finally, source- and destination-hiding properties will be introduced for so called local anonymity, an aspect reflecting the point of view of the users},
  10295         www_section = {anonymity, anonymity measurement},
  10296         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.7843},
  10297         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/THV04.pdf},
  10298         author = {Gergely T{\'o}th and Zolt{\'a}n Horn{\'a}k and Ferenc Vajda},
  10299         editor = {Sanna Liimatainen and Teemupekka Virtanen}
  10300 }
  10301 @article {1015507,
  10302         title = {Mercury: supporting scalable multi-attribute range queries},
  10303         journal = {SIGCOMM Comput. Commun. Rev},
  10304         volume = {34},
  10305         number = {4},
  10306         year = {2004},
  10307         pages = {353--366},
  10308         publisher = {ACM},
  10309         address = {New York, NY, USA},
  10310         abstract = {This paper presents the design of Mercury, a scalable protocol for supporting multi-attribute range-based searches. Mercury differs from previous range-based query systems in that it supports multiple attributes as well as performs explicit load balancing. To guarantee efficient routing and load balancing, Mercury uses novel light-weight sampling mechanisms for uniformly sampling random nodes in a highly dynamic overlay network. Our evaluation shows that Mercury is able to achieve its goals of logarithmic-hop routing and near-uniform load balancing.We also show that Mercury can be used to solve a key problem for an important class of distributed applications: distributed state maintenance for distributed games. We show that the Mercury-based solution is easy to use, and that it reduces the game's messaging overheard significantly compared to a na{\"\i}ve approach},
  10311         www_section = {distributed hash table, load balancing, mercury, P2P, random sampling, range queries},
  10312         issn = {0146-4833},
  10313         doi = {10.1145/1030194.1015507},
  10314         url = {http://portal.acm.org/citation.cfm?id=1030194.1015507$\#$},
  10315         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p625-bharambe1.pdf},
  10316         author = {Bharambe, Ashwin R. and Agrawal, Mukesh and Seshan, Srinivasan}
  10317 }
  10318 @conference {danezis:wpes2004,
  10319         title = {Minx: A simple and efficient anonymous packet format},
  10320         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2004)},
  10321         year = {2004},
  10322         month = {October},
  10323         publisher = {ACM  New York, NY, USA},
  10324         organization = {ACM  New York, NY, USA},
  10325         address = {Washington, DC, USA},
  10326         abstract = {Minx is a cryptographic message format for encoding anonymous messages, relayed through a network of Chaumian mixes. It provides security against a passive adversary by completely hiding correspondences between input and output messages. Possibly corrupt mixes on the message path gain no information about the route length or the position of the mix on the route. Most importantly Minx resists active attackers that are prepared to modify messages in order to embed tags which they will try to detect elsewhere in the network. The proposed scheme imposes a low communication and computational overhead, and only combines well understood cryptographic primitives},
  10327         www_section = {anonymity, tagging attack},
  10328         isbn = {1-58113-968-3},
  10329         doi = {10.1145/1029179.1029198},
  10330         url = {http://portal.acm.org/citation.cfm?id=1029179.1029198},
  10331         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-wpes2004.pdf},
  10332         author = {George Danezis and Ben Laurie}
  10333 }
  10334 @conference {Qiu:2004:MPA:1015467.1015508,
  10335         title = {Modeling and performance analysis of BitTorrent-like peer-to-peer networks},
  10336         booktitle = {SIGCOMM'04. Proceedings of the 2004 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communications},
  10337         series = {SIGCOMM '04},
  10338         year = {2004},
  10339         month = aug,
  10340         pages = {367--378},
  10341         publisher = {ACM},
  10342         organization = {ACM},
  10343         address = {Portland, Oregon, USA},
  10344         abstract = {In this paper, we develop simple models to study the performance of BitTorrent, a second generation peer-to-peer (P2P) application. We first present a simple fluid model and study the scalability, performance and efficiency of such a file-sharing mechanism. We then consider the built-in incentive mechanism of BitTorrent and study its effect on network performance. We also provide numerical results based on both simulations and real traces obtained from the Internet},
  10345         www_section = {fluid model, game theory, peer-to-peer networking},
  10346         isbn = {1-58113-862-8},
  10347         doi = {http://doi.acm.org/10.1145/1015467.1015508},
  10348         url = {http://doi.acm.org/10.1145/1015467.1015508},
  10349         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2704\%20-\%20Qui\%20\%26\%20Srikant\%20-\%20Modeling\%20and\%20performance\%20analysis.pdf},
  10350         author = {Qiu, Dongyu and Rayadurgam Srikant}
  10351 }
  10352 @conference {Aberer04multifacetedsimultaneous,
  10353         title = {Multifaceted Simultaneous Load Balancing in DHT-based P2P systems: A new game with old balls and bins},
  10354         booktitle = {Self-* Properties in Complex Information Systems, {\textquotedblleft}Hot Topics{\textquotedblright} series, LNCS},
  10355         year = {2004},
  10356         publisher = {Springer},
  10357         organization = {Springer},
  10358         abstract = {In this paper we present and evaluate uncoordinated on-line algorithms for simultaneous storage and replication load-balancing in DHT-based peer-to-peer systems. We compare our approach with the classical balls into bins model, and point out the similarities but also the differences which call for new loadbalancing mechanisms specifically targeted at P2P systems. Some of the peculiarities of P2P systems, which make our problem even more challenging are that both the network membership and the data indexed in the network is dynamic, there is neither global coordination nor global information to rely on, and the load-balancing mechanism ideally should not compromise the structural properties and thus the search efficiency of the DHT, while preserving the semantic information of the data (e.g., lexicographic ordering to enable range searches)},
  10359         www_section = {distributed hash table, P2P, storage},
  10360         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.3746},
  10361         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/self-star-load-balance.pdf},
  10362         author = {Karl Aberer and Anwitaman Datta and Manfred Hauswirth}
  10363 }
  10364 @conference {DBLP:conf/infocom/ChandraBB04,
  10365         title = {MultiNet: Connecting to Multiple IEEE 802.11 Networks Using a Single Wireless Card},
  10366         booktitle = {INFOCOM},
  10367         year = {2004},
  10368         abstract = {There are a number of scenarios where it is desirable to have a wireless device connect to multiple networks simultaneously. Currently, this is possible only by using multiple wireless network cards in the device. Unfortunately, using multiple wireless cards causes excessive energy drain and consequent reduction of lifetime in battery operated devices. In this paper, we propose a software based approach, called MultiNet, that facilitates simultaneous connections to multiple networks by virtualizing a single wireless card. The wireless card is virtualized by introducing an intermediate layer below IP, which continuously switches the card across multiple networks. The goal of the switching algorithm is to be transparent to the user who sees her machine as being connected to multiple networks. We present the design, implementation, and performance of the MultiNet system.We analyze and evaluate buffering and switching algorithms in terms of delay and energy consumption. Our system has been operational for over twelve months, it is agnostic of the upper layer protocols, and works well over popular IEEE 802.11 wireless LAN cards},
  10369         url = {http://www.pubzone.org/dblp/conf/infocom/ChandraBB04},
  10370         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/18_3.PDF},
  10371         author = {Ranveer Chandra and Victor Bahl and Pradeep Bahl}
  10372 }
  10373 @conference {Kleinberg:2004:NFD:982792.982803,
  10374         title = {Network failure detection and graph connectivity},
  10375         booktitle = {SODA'04--Proceedings of the Fifteenth Annual ACM-SIAM Symposium on Discrete Algorithms},
  10376         series = {SODA '04},
  10377         year = {2004},
  10378         month = jan,
  10379         pages = {76--85},
  10380         publisher = {Society for Industrial and Applied Mathematics},
  10381         organization = {Society for Industrial and Applied Mathematics},
  10382         address = {New Orleans, Louisiana},
  10383         abstract = {We consider a model for monitoring the connectivity of a network subject to node or edge failures. In particular, we are concerned with detecting ({\epsilon}, k)-failures: events in which an adversary deletes up to network elements (nodes or edges), after which there are two sets of nodes A and B, each at least an {\epsilon} fraction of the network, that are disconnected from one another. We say that a set D of nodes is an ({\epsilon} k)-detection set if, for any ({\epsilon} k)-failure of the network, some two nodes in D are no longer able to communicate; in this way, D "witnesses" any such failure. Recent results show that for any graph G, there is an is ({\epsilon} k)-detection set of size bounded by a polynomial in k and {\epsilon}, independent of the size of G.In this paper, we expose some relationships between bounds on detection sets and the edge-connectivity {\lambda} and node-connectivity {\kappa} of the underlying graph. Specifically, we show that detection set bounds can be made considerably stronger when parameterized by these connectivity values. We show that for an adversary that can delete {\kappa}{\lambda} edges, there is always a detection set of size O(({\kappa}/{\epsilon}) log (1/{\epsilon})) which can be found by random sampling. Moreover, an ({\epsilon}, \&lambda)-detection set of minimum size (which is at most 1/{\epsilon}) can be computed in polynomial time. A crucial point is that these bounds are independent not just of the size of G but also of the value of {\lambda}.Extending these bounds to node failures is much more challenging. The most technically difficult result of this paper is that a random sample of O(({\kappa}/{\epsilon}) log (1/{\epsilon})) nodes is a detection set for adversaries that can delete a number of nodes up to {\kappa}, the node-connectivity.For the case of edge-failures we use VC-dimension techniques and the cactus representation of all minimum edge-cuts of a graph; for node failures, we develop a novel approach for working with the much more complex set of all minimum node-cuts of a graph},
  10384         www_section = {failure detection, graph connectivity, network},
  10385         isbn = {0-89871-558-X},
  10386         url = {http://dl.acm.org/citation.cfm?id=982792.982803},
  10387         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SODA\%2704\%20-\%20Network\%20failure\%20detection\%20and\%20graph\%20connectivity\%250A.pdf},
  10388         author = {Kleinberg, Jon and Sandler, Mark and Slivkins, Aleksandrs}
  10389 }
  10390 @conference {Ng:2004:NPS:1247415.1247426,
  10391         title = {A Network Positioning System for the Internet},
  10392         booktitle = {ATEC'04. Proceedings of the Annual Conference on USENIX Annual Technical Conference},
  10393         series = {ATEC '04},
  10394         year = {2004},
  10395         month = jun,
  10396         pages = {11--11},
  10397         publisher = {USENIX Association},
  10398         organization = {USENIX Association},
  10399         address = {Boston, Massachusetts, USA},
  10400         abstract = {Network positioning has recently been demonstrated to be a viable concept to represent the network distance relationships among Internet end hosts. Several subsequent studies have examined the potential benefits of using network position in applications, and proposed alternative network positioning algorithms. In this paper, we study the problem of designing and building a network positioning system (NPS). We identify several key system-building issues such as the consistency, adaptivity and stability of host network positions over time. We propose a hierarchical network positioning architecture that maintains consistency while enabling decentralization, a set of adaptive decentralized algorithms to compute and maintain accurate, stable network positions, and finally present a prototype system deployed on PlanetLab nodes that can be used by a variety of applications. We believe our system is a viable first step to provide a network positioning capability in the Internet},
  10401         www_section = {Internet, network positioning algorithms, network positioning system, nps},
  10402         url = {http://dl.acm.org/citation.cfm?id=1247415.1247426},
  10403         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ATEC\%2704\%20-\%20A\%20network\%20positioning\%20system.pdf},
  10404         author = {Ng, T. S. Eugene and Zhang, Hui}
  10405 }
  10406 @conference {1251194,
  10407         title = {Operating system support for planetary-scale network services},
  10408         booktitle = {NSDI'04: Proceedings of the 1st conference on Symposium on Networked Systems Design and Implementation},
  10409         year = {2004},
  10410         pages = {19--19},
  10411         publisher = {USENIX Association},
  10412         organization = {USENIX Association},
  10413         address = {Berkeley, CA, USA},
  10414         abstract = {PlanetLab is a geographically distributed overlay network designed to support the deployment and evaluation of planetary-scale network services. Two high-level goals shape its design. First, to enable a large research community to share the infrastructure, PlanetLab provides distributed virtualization, whereby each service runs in an isolated slice of PlanetLab's global resources. Second, to support competition among multiple network services, PlanetLab decouples the operating system running on each node from the network-wide services that define PlanetLab, a principle referred to as unbundled management. This paper describes how Planet-Lab realizes the goals of distributed virtualization and unbundled management, with a focus on the OS running on each node},
  10415         www_section = {overlay networks},
  10416         url = {http://portal.acm.org/citation.cfm?id=1251175.1251194$\#$},
  10417         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/plos_nsdi_04.pdf},
  10418         author = {Bavier, Andy and Bowman, Mic and Chun, Brent and Culler, David and Karlin, Scott and Muir, Steve and Peterson, Larry and Roscoe, Timothy and Spalink, Tammo and Wawrzoniak, Mike}
  10419 }
  10420 @conference {golle:ccs2004,
  10421         title = {Parallel Mixing},
  10422         booktitle = {Proceedings of the 11th ACM Conference on Computer and Communications Security (CCS 2004)},
  10423         year = {2004},
  10424         month = {October},
  10425         publisher = {ACM Press},
  10426         organization = {ACM Press},
  10427         address = {Washington DC, USA},
  10428         abstract = {Efforts to design faster synchronous mix networks have focused on reducing the computational cost of mixing per server. We propose a different approach: our reencryption mixnet allows servers to mix inputs in parallel. The result is a dramatic reduction in overall mixing time for moderate-to-large numbers of servers. As measured in the model we describe, for n inputs and $M$ servers our parallel re encryption mixnet produces output in time at most 2n -- and only around n assuming a majority of honest servers. In contrast, a traditional, sequential, synchronous re-encryption mixnet requires time Mn.
  10429 
  10430 Parallel re-encryption mixnets offer security guarantees comparable to those of synchronous mixnets, and in many cases only a slightly weaker guarantee of privacy. Our proposed construction is applicable to many recently proposed re-encryption mixnets, such as those of Furukawa and Sako, Neff, Jakobsson et al., and Golle and Boneh. In practice, parallel mixnets promise a potentially substantial time saving in applications such as anonymous electronic elections},
  10431         www_section = {anonymity, privacy},
  10432         isbn = {1-58113-961-6},
  10433         doi = {10.1145/1030083.1030113},
  10434         url = {http://portal.acm.org/citation.cfm?id=1030113},
  10435         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/golle-ccs2004.pdf},
  10436         author = {Philippe Golle and Ari Juels}
  10437 }
  10438 @booklet {Fakult04peerstore:better,
  10439         title = {PeerStore: Better Performance by Relaxing in Peer-to-Peer Backup},
  10440         year = {2004},
  10441         abstract = {Backup is cumbersome. To be effective, backups have to be made at regular intervals, forcing users to organize and store a growing collection of backup media. In this paper we propose a novel Peer-to-Peer backup system, PeerStore, that allows the user to store his backups on other people's computers instead. PeerStore is an adaptive, cost-effective system suitable for all types of networks ranging from LAN, WAN to large unstable networks like the Internet. The system consists of two layers: metadata layer and symmetric trading layer. Locating blocks and duplicate checking is accomplished by the metadata layer while the actual data distribution is done between pairs of peers after they have established a symmetric data trade. By decoupling the metadata management from data storage, the system offers a significant reduction of the maintenance cost and preserves fairness among peers. Results show that PeerStore has a reduced maintenance cost comparing to pStore. PeerStore also realizes fairness because of the symmetric nature of the trades},
  10442         www_section = {backup, P2P},
  10443         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.8067},
  10444         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/peerstore-better-performance-by.pdf},
  10445         author = {Martin Landers and Han Zhang and Kian-Lee Tan}
  10446 }
  10447 @conference {2004_8,
  10448         title = {A Peer-to-Peer File Sharing System for Wireless Ad-Hoc Networks},
  10449         booktitle = {A Peer-to-Peer File Sharing System for Wireless Ad-Hoc Networks},
  10450         year = {2004},
  10451         abstract = {File sharing in wireless ad-hoc networks in a peer to peer manner imposes many challenges that make conventional peer-to-peer systems operating on wire-line networks inapplicable for this case. Information and workload distribution as well as routing are major problems for members of a wireless ad-hoc network, which are only aware of their neighborhood. In this paper we propose a system that solves peer-to-peer filesharing problem for wireless ad-hoc networks. Our system works according to peer-to-peer principles, without requiring a central server, and distributes information regarding the location of shared files among members of the network. By means of a {\textquotedblleft}hashline{\textquotedblright} and forming a tree-structure based on the topology of the network, the system is able to answer location queries, and also discover and maintain routing information that is used to transfer files from a source-peer to another peer},
  10452         author = {unknown},
  10453         www_section = {ad-hoc networks, file systems, P2P},
  10454         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.124.9928.pdf},
  10455         editor = {Hasan S{\"o}zer and Metin Kekkalmaz and Ibrahim K{\"o}rpeoglu}
  10456 }
  10457 @article {2004_9,
  10458         title = {Peer-to-Peer Networking \& -Computing},
  10459         journal = {Informatik Spektrum},
  10460         volume = {27},
  10461         year = {2004},
  10462         month = feb,
  10463         pages = {51--54},
  10464         abstract = {Unter dem Begriff Peer-to-Peer etabliert sich ein h{\"o}chst interessantes Paradigma f{\"u}r die Kommunikation im Internet. Obwohl urspr{\"u}nglich nur f{\"u}r die sehr pragmatischen und rechtlich umstrittenen Dateitauschb{\"o}rsen entworfen, k{\"o}nnen die Peerto-Peer-Mechanismen zur verteilten Nutzung unterschiedlichster Betriebsmittel genutzt werden und neue M{\"o}glichkeiten f{\"u}r Internetbasierte Anwendungen er{\"o}ffnen},
  10465         www_section = {computing, networking, peer-to-peer networking},
  10466         doi = {10.1007/s00287-003-0362-9},
  10467         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Informatik\%20Spektrum\%20-\%20Peer-to-peer\%20networking\%20\%26\%20-computing.pdf},
  10468         author = {Ralf Steinmetz and Klaus Wehrle}
  10469 }
  10470 @conference {Cramer04LifeScience,
  10471         title = {Peer-to-Peer Overlays and Data Integration in a Life Science Grid},
  10472         booktitle = {Proceedings of the First International Workshop of the EU Network of Excellence DELOS on Digital Library Architectures},
  10473         year = {2004},
  10474         pages = {127--138},
  10475         type = {publication},
  10476         address = {Cagliari, Italy},
  10477         abstract = {Databases and Grid computing are a good match. With the service orientation of Grid computing, the complexity of maintaining and integrating databases can be kept away from the actual users. Data access and integration is performed via services, which also allow to employ an access control. While it is our perception that many proposed Grid applications rely on a centralized and static infrastructure, Peer-to-Peer (P2P) technologies might help to dynamically scale and enhance Grid applications. The focus does not lie on publicly available P2P networks here, but on the self-organizing capabilities of P2P networks in general. A P2P overlay could, e.g., be used to improve the distribution of queries in a data Grid. For studying the combination of these three technologies, Grid computing, databases, and P2P, in this paper, we use an existing application from the life sciences, drug target validation, as an example. In its current form, this system has several drawbacks. We believe that they can be alleviated by using a combination of the service-based architecture of Grid computing and P2P technologies for implementing the services. The work presented in this paper is in progress. We mainly focus on the description of the current system state, its problems and the proposed new architecture. For a better understanding, we also outline the main topics related to the work presented here},
  10478         www_section = {GRID, overlay networks, P2P},
  10479         url = {http://i30www.ira.uka.de/research/publications/p2p/},
  10480         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04lifescience.pdf},
  10481         author = {Cramer, Curt and Andrea Schafferhans and Thomas Fuhrmann}
  10482 }
  10483 @article {2004_10,
  10484         title = {Personalized Web search for improving retrieval effectiveness},
  10485         journal = {Knowledge and Data Engineering, IEEE Transactions on},
  10486         volume = {16},
  10487         year = {2004},
  10488         month = jan,
  10489         pages = {28--40},
  10490         abstract = {Current Web search engines are built to serve all users, independent of the special needs of any individual user. Personalization of Web search is to carry out retrieval for each user incorporating his/her interests. We propose a novel technique to learn user profiles from users' search histories. The user profiles are then used to improve retrieval effectiveness in Web search. A user profile and a general profile are learned from the user's search history and a category hierarchy, respectively. These two profiles are combined to map a user query into a set of categories which represent the user's search intention and serve as a context to disambiguate the words in the user's query. Web search is conducted based on both the user query and the set of categories. Several profile learning and category mapping algorithms and a fusion algorithm are provided and evaluated. Experimental results indicate that our technique to personalize Web search is both effective and efficient},
  10491         www_section = {BANDWIDTH, category hierarchy, category mapping algorithms, Displays, fusion algorithm, History, human factors, information filtering, information retrieval, libraries, personalized Web search, profile learning, retrieval effectiveness, search engines, search intention, special needs, user interfaces, user profiles, user search histories, Web search, Web search engines},
  10492         issn = {1041-4347},
  10493         doi = {10.1109/TKDE.2004.1264820},
  10494         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PersonalizedWebSearch2004Liu.pdf},
  10495         author = {Fang Liu and Yu, C. and Weiyi Meng}
  10496 }
  10497 @booklet {2004_11,
  10498         title = {POSIX--Portable Operating System Interface},
  10499         journal = {The Open Group Technical Standard Base Specifications, Issue 6},
  10500         number = {IEEE Std 1003.n },
  10501         year = {2004},
  10502         www_section = {API, asynchronous, built-in utility, CPU, file access control mechanism, input/output (I/O), job control, network, portable operating system interface (POSIX), shell, stream, synchronous},
  10503         url = {http://pubs.opengroup.org/onlinepubs/009695399/},
  10504         author = {The Open Group and IEEE}
  10505 }
  10506 @conference {morphmix-fc2004,
  10507         title = {Practical Anonymity for the Masses with MorphMix},
  10508         booktitle = {Proceedings of Financial Cryptography (FC '04)},
  10509         year = {2004},
  10510         month = feb,
  10511         pages = {233--250},
  10512         publisher = {Springer-Verlag, LNCS 3110},
  10513         organization = {Springer-Verlag, LNCS 3110},
  10514         abstract = {MorphMix is a peer-to-peer circuit-based mix network to provide practical anonymous low-latency Internet access for millions of users. The basic ideas of MorphMix have been published before; this paper focuses on solving open problems and giving an analysis of the resistance to attacks and the performance it offers assuming realistic scenarios with very many users. We demonstrate that MorphMix scales very well and can support as many nodes as there are public IP addresses. In addition, we show that MorphMix is indeed practical because it provides good resistance from long-term profiling and offers acceptable performance despite the heterogeneity of the nodes and the fact that nodes can join or leave the system at any time},
  10515         www_section = {anonymity, P2P},
  10516         isbn = {978-3-540-22420-4},
  10517         doi = {10.1007/b98935},
  10518         url = {http://www.springerlink.com/content/dc1qn54t9ta4u3g1/},
  10519         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphmix-fc2004.pdf},
  10520         author = {Marc Rennhard and Bernhard Plattner},
  10521         editor = {Ari Juels}
  10522 }
  10523 @article {Cox:2004:PDN:972374.972394,
  10524         title = {Practical, distributed network coordinates},
  10525         journal = {SIGCOMM Computer Communication Review},
  10526         volume = {34},
  10527         year = {2004},
  10528         month = jan,
  10529         pages = {113--118},
  10530         publisher = {ACM},
  10531         address = {New York, NY, USA},
  10532         abstract = {Vivaldi is a distributed algorithm that assigns synthetic coordinates to internet hosts, so that the Euclidean distance between two hosts' coordinates predicts the network latency between them. Each node in Vivaldi computes its coordinates by simulating its position in a network of physical springs. Vivaldi is both distributed and efficient: no fixed infrastructure need be deployed and a new host can compute useful coordinates after collecting latency information from only a few other hosts. Vivaldi can rely on piggy-backing latency information on application traffic instead of generating extra traffic by sending its own probe packets.This paper evaluates Vivaldi through simulations of 750 hosts, with a matrix of inter-host latencies derived from measurements between 750 real Internet hosts. Vivaldi finds synthetic coordinates that predict the measured latencies with a median relative error of 14 percent. The simulations show that a new host joining an existing Vivaldi system requires fewer than 10 probes to achieve this accuracy. Vivaldi is currently used by the Chord distributed hash table to perform proximity routing, replica selection, and retransmission timer estimation},
  10533         www_section = {network coordinates, proximity routing, replica selection, retransmission timer estimation, Vivaldi},
  10534         issn = {0146-4833},
  10535         doi = {http://doi.acm.org/10.1145/972374.972394},
  10536         url = {http://doi.acm.org/10.1145/972374.972394},
  10537         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20Practical\%2C\%20distributed\%20network\%20coordinates.pdf},
  10538         author = {Russ Cox and Dabek, Frank and Frans M. Kaashoek and Li, Jinyang and Robert Morris}
  10539 }
  10540 @conference {e2e-traffic,
  10541         title = {Practical Traffic Analysis: Extending and Resisting Statistical Disclosure},
  10542         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10543         series = {LNCS},
  10544         volume = {3424},
  10545         year = {2004},
  10546         month = {May},
  10547         pages = {17--34},
  10548         publisher = {Springer Berlin / Heidelberg},
  10549         organization = {Springer Berlin / Heidelberg},
  10550         abstract = {We extend earlier research on mounting and resisting passive long-term end-to-end traffic analysis attacks against anonymous message systems, by describing how an eavesdropper can learn sender-receiver connections even when the substrate is a network of pool mixes, the attacker is non-global, and senders have complex behavior or generate padding messages. Additionally, we describe how an attacker can use information about message distinguishability to speed the attack. We simulate our attacks for a variety of scenarios, focusing on the amount of information needed to link senders to their recipients. In each scenario, we show that the intersection attack is slowed but still succeeds against a steady-state mix network. We find that the attack takes an impractical amount of time when message delivery times are highly variable; when the attacker can observe very little of the network; and when users pad consistently and the adversary does not know how the network behaves in their absence},
  10551         www_section = {traffic analysis},
  10552         isbn = {978-3-540-26203-9},
  10553         doi = {10.1007/b136164},
  10554         url = {http://www.springerlink.com/content/v6m6cat1lxvbd4yd/},
  10555         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/e2e-traffic.pdf},
  10556         author = {Nick Mathewson and Roger Dingledine}
  10557 }
  10558 @article {Wright:2004,
  10559         title = {The Predecessor Attack: An Analysis of a Threat to Anonymous Communications Systems},
  10560         journal = {ACM Transactions on Information and System Security (TISSEC)},
  10561         volume = {7},
  10562         number = {7},
  10563         year = {2004},
  10564         month = {November},
  10565         pages = {489--522},
  10566         abstract = {There have been a number of protocols proposed for anonymous network communication. In this paper, we investigate attacks by corrupt group members that degrade the anonymity of each protocol over time. We prove that when a particular initiator continues communication with a particular responder across path reformations, existing protocols are subject to the attack. We use this result to place an upper bound on how long existing protocols, including Crowds, Onion Routing, Hordes, Web Mixes, and DC-Net, can maintain anonymity in the face of the attacks described. This provides a basis for comparing these protocols against each other. Our results show that fully connected DC-Net is the most resilient to these attacks, but it suffers from scalability issues that keep anonymity group sizes small. We also show through simulation that the underlying topography of the DC-Net affects the resilience of the protocol: as the number of neighbors a node has increases the strength of the protocol increases, at the cost of higher communication overhead},
  10567         www_section = {anonymity, predecessor attack, privacy},
  10568         issn = {1094-9224},
  10569         doi = {10.1145/1042031.1042032},
  10570         url = {http://portal.acm.org/citation.cfm?id=1042031.1042032\&coll=GUIDE\&dl=GUIDE\&CFID=76057600\&CFTOKEN=15386893},
  10571         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Wright-2004.pdf},
  10572         author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}
  10573 }
  10574 @booklet {Acquisti04privacyin,
  10575         title = {Privacy in Electronic Commerce and the Economics of Immediate Gratification},
  10576         year = {2004},
  10577         abstract = {Dichotomies between privacy attitudes and behavior have been noted in the literature but not yet fully explained. We apply lessons from the research on behavioral economics to understand the individual decision making process with respect to privacy in electronic commerce. We show that it is unrealistic to expect individual rationality in this context. Models of self-control problems and immediate gratification offer more realistic descriptions of the decision process and are more consistent with currently available data. In particular, we show why individuals who may genuinely want to protect their privacy might not do so because of psychological distortions well documented in the behavioral literature; we show that these distortions may affect not only {\textquoteleft}na{\"\i}ve' individuals but also {\textquoteleft}sophisticated' ones; and we prove that this may occur also when individuals perceive the risks from not protecting their privacy as significant},
  10578         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.3760\&rep=rep1\&type=pdf},
  10579         author = {Alessandro Acquisti}
  10580 }
  10581 @conference {Atallah:2004:PCF:1029179.1029204,
  10582         title = {Private collaborative forecasting and benchmarking},
  10583         booktitle = {WPES'04--Proceedings of the 2004 ACM workshop on Privacy in the electronic society},
  10584         series = {WPES '04},
  10585         year = {2004},
  10586         month = oct,
  10587         pages = {103--114},
  10588         publisher = {ACM},
  10589         organization = {ACM},
  10590         address = {Washington, DC, USA},
  10591         abstract = {Suppose a number of hospitals in a geographic area want to learn how their own heart-surgery unit is doing compared with the others in terms of mortality rates, subsequent complications, or any other quality metric. Similarly, a number of small businesses might want to use their recent point-of-sales data to cooperatively forecast future demand and thus make more informed decisions about inventory, capacity, employment, etc. These are simple examples of cooperative benchmarking and (respectively) forecasting that would benefit all participants as well as the public at large, as they would make it possible for participants to avail themselves of more precise and reliable data collected from many sources, to assess their own local performance in comparison to global trends, and to avoid many of the inefficiencies that currently arise because of having less information available for their decision-making. And yet, in spite of all these advantages, cooperative benchmarking and forecasting typically do not take place, because of the participants' unwillingness to share their information with others. Their reluctance to share is quite rational, and is due to fears of embarrassment, lawsuits, weakening their negotiating position (e.g., in case of over-capacity), revealing corporate performance and strategies, etc. The development and deployment of <i>private</i> benchmarking and forecasting technologies would allow such collaborations to take place without revealing any participant's data to the others, reaping the benefits of collaboration while avoiding the drawbacks. Moreover, this kind of technology would empower smaller organizations who could then cooperatively base their decisions on a much broader information base, in a way that is today restricted to only the largest corporations. This paper is a step towards this goal, as it gives protocols for forecasting and benchmarking that reveal to the participants the desired answers yet do not reveal to any participant any other participant's private data. We consider several forecasting methods, including linear regression and time series techniques such as moving average and exponential smoothing. One of the novel parts of this work, that further distinguishes it from previous work in secure multi-party computation, is that it involves floating point arithmetic, in particular it provides protocols to securely and efficiently perform division},
  10592         www_section = {benchmarking, e-commerce, forecasting, privacy, secure multi-party computation, secure protocol, SMC},
  10593         isbn = {1-58113-968-3},
  10594         doi = {10.1145/1029179.1029204},
  10595         url = {http://doi.acm.org/10.1145/1029179.1029204},
  10596         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2704\%20-\%20Forecasting\%20and\%20benchamking.pdf},
  10597         author = {Atallah, Mikhail and Bykova, Marina and Li, Jiangtao and Frikken, Keith and Topkara, Mercan}
  10598 }
  10599 @article {kissner04private,
  10600         title = {Private keyword-based push and pull with applications to anonymous communication},
  10601         journal = {Applied Cryptography and Network Security},
  10602         year = {2004},
  10603         abstract = {We propose a new keyword-based Private Information Retrieval (PIR) model that allows private modification of the database from which information is requested. In our model, the database is distributed over n servers, any one of which can act as a transparent interface for clients. We present protocols that support operations for accessing data, focusing on privately appending labelled records to the database (push) and privately retrieving the next unseen record appended under a given label (pull). The communication complexity between the client and servers is independent of the number of records in the database (or more generally, the number of previous push and pull operations) and of the number of servers. Our scheme also supports access control oblivious to the database servers by implicitly including a public key in each push, so that only the party holding the private key can retrieve the record via pull. To our knowledge, this is the first system that achieves the following properties: private database modification, private retrieval of multiple records with the same keyword, and oblivious access control. We also provide a number of extensions to our protocols and, as a demonstrative application, an unlinkable anonymous communication service using them},
  10604         www_section = {distributed database, private information retrieval, private key, public key cryptography},
  10605         isbn = {3-540-22217-0},
  10606         issn = {0302-9743 },
  10607         url = {http://cat.inist.fr/?aModele=afficheN\&cpsidt=15852065},
  10608         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kissner04private.pdf},
  10609         author = {Lea Kissner and Alina Oprea and Michael K. Reiter and Dawn Xiaodong Song and Ke Yang}
  10610 }
  10611 @conference {2004_12,
  10612         title = {A Probabilistic Approach to Predict Peers' Performance in P2P Networks},
  10613         booktitle = {CIA 2004. Cooperative Information Agents VIII, 8th International Workshop},
  10614         series = {Lecture Notes in Computer Science},
  10615         volume = {3191},
  10616         year = {2004},
  10617         month = sep,
  10618         pages = {62--76},
  10619         publisher = {Springer},
  10620         organization = {Springer},
  10621         address = {Erfurt, Germany},
  10622         abstract = {The problem of encouraging trustworthy behavior in P2P online communities by managing peers' reputations has drawn a lot of attention recently. However, most of the proposed solutions exhibit the following two problems: huge implementation overhead and unclear trust related model semantics. In this paper we show that a simple probabilistic technique, maximum likelihood estimation namely, can reduce these two problems substantially when employed as the feedback aggregation strategy. Thus, no complex exploration of the feedback is necessary. Instead, simple, intuitive and efficient probabilistic estimation methods suffice},
  10623         www_section = {p2p network, peer performance},
  10624         doi = {10.1007/978-3-540-30104-2_6},
  10625         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CIA\%2704\%20-\%20Despotovic\%20\%26\%20Aberer\%20-\%20Peers\%27\%20performance\%20in\%20P2P\%20networks.pdf},
  10626         author = {Zoran Despotovic and Karl Aberer}
  10627 }
  10628 @article {crowds-model,
  10629         title = {Probabilistic Model Checking of an Anonymity System},
  10630         journal = {Journal of Computer Security},
  10631         volume = {12},
  10632         number = {3-4},
  10633         year = {2004},
  10634         pages = {355--377},
  10635         abstract = {We use the probabilistic model checker PRISM to analyze the Crowds system for anonymous Web browsing. This case study demonstrates how probabilistic model checking techniques can be used to formally analyze security properties of a peer-to-peer group communication system based on random message routing among members. The behavior of group members and the adversary is modeled as a discrete-time Markov chain, and the desired security properties are expressed as PCTL formulas. The PRISM model checker is used to perform automated analysis of the system and verify anonymity guarantees it provides. Our main result is a demonstration of how certain forms of probabilistic anonymity degrade when group size increases or random routing paths are rebuilt, assuming that the corrupt group members are able to identify and/or correlate multiple routing paths originating from the same sender},
  10636         www_section = {anonymity, P2P, routing},
  10637         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.6570},
  10638         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shmat_crowds.pdf},
  10639         author = {Vitaly Shmatikov}
  10640 }
  10641 @conference {berman-fc2004,
  10642         title = {Provable Unlinkability Against Traffic Analysis},
  10643         booktitle = {Proceedings of Financial Cryptography (FC '04)},
  10644         year = {2004},
  10645         month = feb,
  10646         pages = {266--280},
  10647         publisher = {Springer-Verlag, LNCS 3110},
  10648         organization = {Springer-Verlag, LNCS 3110},
  10649         abstract = {We consider unlinkability of communication problem: given n users, each sending a message to some destination, encode and route the messages so that an adversary analyzing the traffic in the communication network cannot link the senders with the recipients. A solution should have a small communication overhead, that is, the number of additional messages should be kept low.
  10650 David Chaum introduced idea of mixes for solving this problem. His approach was developed further by Simon and Rackoff, and implemented later as the onion protocol. Even if the onion protocol is widely regarded as secure and used in practice, formal arguments supporting this claim are rare and far from being complete. On top of that, in certain scenarios very simple tricks suffice to break security without breaking the cryptographic primitives. It turns out that one source of difficulties in analyzing the onion protocols security is the adversary model. In a recent work, Berman, Fiat and Ta-Shma develop a new and more realistic model in which only a constant fraction of communication lines can be accessed by an adversary, the number of messages does not need to be high and the preferences of the users are taken into account. For this model they prove that with high probability a good level of unlinkability is obtained after  steps of the onion protocol where n is the number of messages sent.
  10651 In this paper we improve these results: we show that the same level of unlinkability (expressed as variation distance between certain probability distributions) is obtained with high probability already after  steps of the onion protocol. Asymptotically, this is the best result possible, since obviously (log n) steps are necessary. On top of that, our analysis is much simpler. It is based on path coupling technique designed for showing rapid mixing of Markov chains},
  10652         www_section = {anonymity, Markov chain, path coupling, rapid mixing, unlinkability},
  10653         isbn = {978-3-540-23208-7},
  10654         doi = {10.1007/b100936},
  10655         url = {http://www.springerlink.com/content/cknab9y9bpete2ha/},
  10656         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/berman-fc2004.pdf},
  10657         author = {Ron Berman and Amos Fiat and Amnon Ta-Shma},
  10658         editor = {Ari Juels}
  10659 }
  10660 @conference {Perng04providingcontent-based,
  10661         title = {Providing content-based services in a peer-to-peer environment},
  10662         booktitle = {in Proceedings of the third International Workshop on Distributed Event-Based Systems (DEBS)},
  10663         year = {2004},
  10664         pages = {74--79},
  10665         abstract = {Information dissemination in wide area networks has recently garnered much attention. Two differing models, publish/subscribe and rendezvous-based multicast atop overlay networks, have emerged as the two leading approaches for this goal. Event-based publish/subscribe supports contentbased services with powerful filtering capabilities, while peer-to-peer rendezvous-based services allow for efficient communication in a dynamic network infrastructure. We describe Reach, a system that integrates these two approaches to provide efficient and scalable content-based services in a dynamic network setting},
  10666         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.4393\&rep=rep1\&type=pdf},
  10667         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/debs04perng.pdf},
  10668         author = {Ginger Perng and Chenxi Wang and Michael K. Reiter}
  10669 }
  10670 @conference { boneh04publickey,
  10671         title = {Public-key encryption with keyword search},
  10672         booktitle = {Eurocrypt 2004},
  10673         year = {2004},
  10674         month = jan,
  10675         publisher = {Springer-Verlag},
  10676         organization = {Springer-Verlag},
  10677         abstract = {We study the problem of searching on data that is encrypted using a public key system. Consider user Bob who sends email to user Alice encrypted under Alice's public key. An email gateway wants to test whether the email contains the keyword "urgent" so that it could route the email accordingly. Alice, on the other hand does not wish to give the gateway the ability to decrypt all her messages. We define and construct a mechanism that enables Alice to provide a key to the gateway that},
  10678         url = {citeseer.ist.psu.edu/boneh04public.html},
  10679         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/encsearch.pdf},
  10680         author = {Dan Boneh and Giovanni Di Crescenzo and Rafail Ostrovsky and Gieseppe Persiano}
  10681 }
  10682 @conference {pool-dummy04,
  10683         title = {Reasoning about the Anonymity Provided by Pool Mixes that Generate Dummy Traffic},
  10684         booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)},
  10685         series = {LNCS},
  10686         year = {2004},
  10687         month = {May},
  10688         address = {Toronto},
  10689         abstract = {In this paper we study the anonymity provided by genralized mixes that insert dummy traffic. Mixes are an essential component to offer anonymous email services. We indicate how to compute the recipient and sender anonymity and we point out some problems that may arise from the intutitive extension of the metric to make into account dummies. Two possible ways of inserting dummy traffic are disussed and compared. An active attack scenario is considered, and the anonymity provided by mixes under the attack is analyzed},
  10690         www_section = {anonymity},
  10691         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pool-dummy04.pdf},
  10692         author = {Claudia Diaz and Bart Preneel}
  10693 }
  10694 @conference {1247420,
  10695         title = {Redundancy elimination within large collections of files},
  10696         booktitle = {ATEC '04: Proceedings of the annual conference on USENIX Annual Technical Conference},
  10697         year = {2004},
  10698         pages = {5--5},
  10699         publisher = {USENIX Association},
  10700         organization = {USENIX Association},
  10701         address = {Berkeley, CA, USA},
  10702         abstract = {Ongoing advancements in technology lead to ever-increasing storage capacities. In spite of this, optimizing storage usage can still provide rich dividends. Several techniques based on delta-encoding and duplicate block suppression have been shown to reduce storage overheads, with varying requirements for resources such as computation and memory. We propose a new scheme for storage reduction that reduces data sizes with an effectiveness comparable to the more expensive techniques, but at a cost comparable to the faster but less effective ones. The scheme, called Redundancy Elimination at the Block Level (REBL), leverages the benefits of compression, duplicate block suppression, and delta-encoding to eliminate a broad spectrum of redundant data in a scalable and efficient manner. REBL generally encodes more compactly than compression (up to a factor of 14) and a combination of compression and duplicate suppression (up to a factor of 6.7). REBL also encodes similarly to a technique based on delta-encoding, reducing overall space significantly in one case. Furthermore, REBL uses super-fingerprints, a technique that reduces the data needed to identify similar blocks while dramatically reducing the computational requirements of matching the blocks: it turns O(n2) comparisons into hash table lookups. As a result, using super-fingerprints to avoid enumerating matching data objects decreases computation in the resemblance detection phase of REBL by up to a couple orders of magnitude},
  10703         url = {http://portal.acm.org/citation.cfm?id=1247420$\#$},
  10704         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.91.8331.pdf},
  10705         author = {Kulkarni, Purushottam and Douglis, Fred and Jason Lavoie and Tracey, John M.}
  10706 }
  10707 @conference {Barreto04areplicated,
  10708         title = {A Replicated File System for Resource Constrained Mobile Devices},
  10709         booktitle = {Proceedings of IADIS Applied Computing},
  10710         year = {2004},
  10711         abstract = {The emergence of more powerful and resourceful mobile devices, as well as new wireless communication technologies, is turning the concept of ad-hoc networking into a viable and promising possibility for ubiquitous information sharing. However, the inherent characteristics of ad-hoc networks bring up new challenges for which most conventional systems don't provide an appropriate response. Namely, the lack of a pre-existing infrastructure, the high topological dynamism of these networks, the relatively low bandwidth of wireless links, as well as the limited storage and energy resources of mobile devices are issues that strongly affect the efficiency of any distributed system intended to provide ubiquitous information sharing. In this paper we describe Haddock-FS, a transparent replicated file system designed to support collaboration in the novel usage scenarios enabled by mobile environments. Haddock-FS is based on a highly available optimistic consistency protocol. In order to effectively cope with the network bandwidth and device memory constraints of these environments, Haddock-FS employs a limited size log truncation scheme and a cross-file, cross-version content similarity exploitation mechanism},
  10712         www_section = {ad-hoc networks, ubiquitous computing},
  10713         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.144.9141},
  10714         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.144.9141.pdf},
  10715         author = {Jo{\~a}o Barreto and Paulo Ferreira}
  10716 }
  10717 @conference {golle:pet2004,
  10718         title = {Reputable Mix Networks},
  10719         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10720         series = {LNCS},
  10721         volume = {3424},
  10722         year = {2004},
  10723         month = {May},
  10724         pages = {51--63},
  10725         abstract = {We define a new type of mix network that offers a reduced form of robustness: the mixnet can prove that every message it outputs corresponds to an input submitted by a player without revealing which input (for honest players). We call mixnets with this property reputable mixnets. Reputable mixnets are not fully robust, because they offer no guarantee that distinct outputs correspond to distinct inputs. In particular, a reputable mix may duplicate or erase messages. A reputable mixnet, however, can defend itself against charges of having authored the output messages it produces. This ability is very useful in practice, as it shields the mixnet from liability in the event that an output message is objectionable or illegal.
  10726 We propose three very efficient protocols for reputable mixnets, all synchronous. The first protocol is based on blind signatures. It works both with Chaumian decryption mixnets or re-encryption mixnets based on ElGamal, but guarantees a slightly weaker form of reputability which we call near-reputability. The other two protocols are based on ElGamal re-encryption over a composite group and offer true reputability. One requires interaction between the mixnet and the players before players submit their inputs. The other assumes no interaction prior to input submission},
  10727         www_section = {anonymity, privacy},
  10728         isbn = {978-3-540-26203-9},
  10729         doi = {10.1007/b136164},
  10730         url = {http://www.springerlink.com/content/mqpu4nyljy82ca90/},
  10731         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/golle-pet2004.pdf},
  10732         author = {Philippe Golle}
  10733 }
  10734 @conference {Gupta:2004:RMF:1018440.1021942,
  10735         title = {Reputation Management Framework and Its Use as Currency in Large-Scale Peer-to-Peer Networks},
  10736         booktitle = {P2P'04. Proceedings of the 4th International Conference on Peer-to-Peer Computing},
  10737         series = {P2P '04},
  10738         year = {2004},
  10739         month = aug,
  10740         pages = {124--132},
  10741         publisher = {IEEE Computer Society},
  10742         organization = {IEEE Computer Society},
  10743         address = {Zurich, Switzerland},
  10744         abstract = {In this paper we propose a reputation management framework for large-scale peer-to-peer (P2P) networks, wherein all nodes are assumed to behave selfishly. The proposed framework has several advantages. It enables a form of virtual currency, such that the reputation of nodes is a measure of their wealth. The framework is scalable and provides protection against attacks by malicious nodes. The above features are achieved by developing trusted communities of nodes whose members trust each other and cooperate to deal with the problem of nodes{\'y} selfishness and possible maliciousness},
  10745         www_section = {framework, P2P, peer-to-peer networking, reputation management},
  10746         isbn = {0-7695-2156-8},
  10747         doi = {http://dx.doi.org/10.1109/P2P.2004.44},
  10748         url = {http://dx.doi.org/10.1109/P2P.2004.44},
  10749         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2704\%20-\%20Reputation\%20management\%20framework.pdf},
  10750         author = {Gupta, Rohit and Somani, Arun K.}
  10751 }
  10752 @conference {Awerbuch04robustdistributed,
  10753         title = {Robust Distributed Name Service},
  10754         booktitle = {In Proc. of the 3rd International Workshop on Peer-to-Peer Systems (IPTPS)},
  10755         year = {2004},
  10756         pages = {1--8},
  10757         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.142.4900},
  10758         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/awerbuch-robust.pdf},
  10759         author = {Awerbuch, Baruch}
  10760 }
  10761 @conference {Feldman:2004:RIT:988772.988788,
  10762         title = {Robust incentive techniques for peer-to-peer networks},
  10763         booktitle = {EC'04. Proceedings of the 5th ACM Conference on Electronic Commerce},
  10764         series = {EC '04},
  10765         year = {2004},
  10766         month = may,
  10767         pages = {102--111},
  10768         publisher = {ACM},
  10769         organization = {ACM},
  10770         address = {New York, NY, USA},
  10771         abstract = {Lack of cooperation (free riding) is one of the key problems that confronts today's P2P systems. What makes this problem particularly difficult is the unique set of challenges that P2P systems pose: large populations, high turnover, a symmetry of interest, collusion, zero-cost identities, and traitors. To tackle these challenges we model the P2P system using the Generalized Prisoner's Dilemma (GPD),and propose the Reciprocative decision function as the basis of a family of incentives techniques. These techniques are fullydistributed and include: discriminating server selection, maxflow-based subjective reputation, and adaptive stranger policies. Through simulation, we show that these techniques can drive a system of strategic users to nearly optimal levels of cooperation},
  10772         www_section = {cheap pseudonyms, collusion, free-riding, incentives, peer-to-peer networking, prisoners dilemma, reputation, whitewash, whitewashing},
  10773         isbn = {1-58113-771-0},
  10774         doi = {http://doi.acm.org/10.1145/988772.988788},
  10775         url = {http://doi.acm.org/10.1145/988772.988788},
  10776         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2704\%20-\%20Robust\%20incentive\%20techniques\%20for\%20P2P\%20networks.pdf},
  10777         author = {Michal Feldman and Kevin Lai and Ion Stoica and John Chuang}
  10778 }
  10779 @article {2004_13,
  10780         title = {Scalable byzantine agreement},
  10781         year = {2004},
  10782         abstract = {This paper gives a scalable protocol for solving the Byzantine agreement problem. The protocol is scalable in the sense that for Byzantine agreement over n processors, each processor sends and receives only O(log n) messages in expectation. To the best of our knowledge this is the first result for the Byzantine agreement problem where each processor sends and receives o(n) messages. The protocol uses randomness and is correct with high probability. 1 It can tolerate any fraction of faulty processors which is strictly less than 1/6. Our result partially answers the following question posed by Kenneth Birman: {\textquotedblleft}How scalable are the traditional solutions to problems such as Consensus or Byzantine Agreement?{\textquotedblright} [5]},
  10783         www_section = {byzantine agreement},
  10784         journal = {unknown},
  10785         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sba.pdf},
  10786         author = {Lewis, Scott and Saia, Jared}
  10787 }
  10788 @conference {Goh04secureindexes,
  10789         title = {Secure Indexes},
  10790         booktitle = {In submission},
  10791         year = {2004},
  10792         url = {http://gnunet.org/papers/secureindex.pdf  },
  10793         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/secureindex.pdf},
  10794         author = {Eu-jin Goh}
  10795 }
  10796 @conference {Conrad04SecureServiceSignaling,
  10797         title = {Secure Service Signaling and fast Authorization in Programmable Networks},
  10798         booktitle = {Proceedings of the 6th International Working Conference on Active Networking (IWAN) 2004},
  10799         year = {2004},
  10800         publisher = {Springer Berlin / Heidelberg},
  10801         organization = {Springer Berlin / Heidelberg},
  10802         type = {publication},
  10803         address = {Lawrence, Kansas},
  10804         abstract = {Programmable networks aim at the fast and flexible creation of services within a network. Often cited examples are audio and video transcoding, application layer multicast, or mobility and resilience support. In order to become commercially viable, programmable networks must provide authentication, authorization and accounting functionality. The mechanisms used to achieve these functionalities must be secure, reliable, and scalable, to be used in production scale programmable networks. Additionally programmable nodes must resist various kinds of attacks, such as denial of service or replay attacks. Fraudulent use by individual users must also be prohibited.
  10805 This paper describes the design and implementation of a secure, reliable, and scalable signaling mechanism clients can use to initiate service startup and to manage services running on the nodes of a programmable network. This mechanism is designed for production scale networks with AAA-functionality},
  10806         www_section = {programmable networks, secrecy},
  10807         isbn = {978-3-540-71499-6},
  10808         doi = {10.1007/978-3-540-71500-9},
  10809         url = {http://i30www.ira.uka.de/research/publications/p2p/},
  10810         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/iwan2004.pdf},
  10811         author = {Michael Conrad and Thomas Fuhrmann and Marcus Schoeller and Martina Zitterbart}
  10812 }
  10813 @mastersthesis {2004_14,
  10814         title = {Signaling and Networking in Unstructured Peer-to-Peer Networks},
  10815         year = {2004},
  10816         month = sep,
  10817         pages = {0--177},
  10818         school = {Technische Universit{\"a}t M{\"u}nchen},
  10819         type = {Dissertation},
  10820         address = {Munich, Germany},
  10821         abstract = {This work deals with the efficiency of Peer-to-Peer (P2P) networks, which are distributed and self-organizing overlay networks. We contribute to their understanding and design by using new measurement techniques, simulations and analytical methods. In this context we first present measurement methods and results of P2P networks concerning traffic and topology characteristics as well as concerning user behavior. Based on these results we develop stochastic models to describe the user behavior, the traffic and the topology of P2P networks analytically. Using the results of our measurements and analytical investigations, we develop new P2P architectures to improve the efficiency of P2P networks concerning their topology and their signaling traffic. Finally we verify our results for the new architectures by measurements as well as computer-based simulations on different levels of detail},
  10822         www_section = {application model, communication network, compression, content availability, cross layer communication, generating functions, overlay networks, random graph theory, self-organization, signaling traffic, simulation, topology measurement, traffic measurement, user model},
  10823         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Schollmeier\%20-\%20Signaling\%20and\%20networking\%20in\%20unstructured\%20p2p\%20networks.pdf},
  10824         author = {R{\"u}diger Schollmeier}
  10825 }
  10826 @conference {1007919,
  10827         title = {Simple efficient load balancing algorithms for peer-to-peer systems},
  10828         booktitle = {SPAA '04: Proceedings of the sixteenth annual ACM symposium on Parallelism in algorithms and architectures},
  10829         year = {2004},
  10830         pages = {36--43},
  10831         publisher = {ACM},
  10832         organization = {ACM},
  10833         address = {New York, NY, USA},
  10834         abstract = {Load balancing is a critical issue for the efficient operation of peer-to-peer networks. We give two new load-balancing protocols whose provable performance guarantees are within a constant factor of optimal. Our protocols refine the consistent hashing data structure that underlies the Chord (and Koorde) P2P network. Both preserve Chord's logarithmic query time and near-optimal data migration cost.Consistent hashing is an instance of the distributed hash table (DHT) paradigm for assigning items to nodes in a peer-to-peer system: items and nodes are mapped to a common address space, and nodes have to store all items residing closeby in the address space.Our first protocol balances the distribution of the key address space to nodes, which yields a load-balanced system when the DHT maps items "randomly" into the address space. To our knowledge, this yields the first P2P scheme simultaneously achieving O(log n) degree, O(log n) look-up cost, and constant-factor load balance (previous schemes settled for any two of the three).Our second protocol aims to directly balance the distribution of items among the nodes. This is useful when the distribution of items in the address space cannot be randomized. We give a simple protocol that balances load by moving nodes to arbitrary locations "where they are needed." As an application, we use the last protocol to give an optimal implementation of a distributed data structure for range searches on ordered data},
  10835         www_section = {load balancing, P2P},
  10836         isbn = {1-58113-840-7},
  10837         doi = {10.1145/1007912.1007919},
  10838         url = {http://portal.acm.org/citation.cfm?id=1007919$\#$},
  10839         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2405.pdf},
  10840         author = {David Karger and Ruhl, Matthias}
  10841 }
  10842 @conference {Shnayder04simulatingthe,
  10843         title = {Simulating the power consumption of large-scale sensor network applications},
  10844         booktitle = {In Sensys},
  10845         year = {2004},
  10846         pages = {188--200},
  10847         publisher = {ACM Press},
  10848         organization = {ACM Press},
  10849         abstract = {Developing sensor network applications demands a new set of tools to aid programmers. A number of simulation environments have been developed that provide varying degrees of scalability, realism, and detail for understanding the behavior of sensor networks. To date, however, none of these tools have addressed one of the most important aspects of sensor application design: that of power consumption. While simple approximations of overall power usage can be derived from estimates of node duty cycle and communication rates, these techniques often fail to capture the detailed, low-level energy requirements of the CPU, radio, sensors, and other peripherals.
  10850 
  10851 In this paper, we present, a scalable simulation environment for wireless sensor networks that provides an accurate, per-node estimate of power consumption. PowerTOSSIM is an extension to TOSSIM, an event-driven simulation environment for TinyOS applications. In PowerTOSSIM, TinyOS components corresponding to specific hardware peripherals (such as the radio, EEPROM, LEDs, and so forth) are instrumented to obtain a trace of each device's activity during the simulation runPowerTOSSIM employs a novel code-transformation technique to estimate the number of CPU cycles executed by each node, eliminating the need for expensive instruction-level simulation of sensor nodes. PowerTOSSIM includes a detailed model of hardware energy consumption based on the Mica2 sensor node platform. Through instrumentation of actual sensor nodes, we demonstrate that PowerTOSSIM provides accurate estimation of power consumption for a range of applications and scales to support very large simulations},
  10852         www_section = {sensor networks, TinyOS},
  10853         doi = {10.1145/1031495.1031518},
  10854         url = {http://portal.acm.org/citation.cfm?id=1031495.1031518},
  10855         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.9976.pdf},
  10856         author = {Victor Shnayder and Mark Hempstead and Bor-rong Chen and Geoff Werner Allen and Matt Welsh}
  10857 }
  10858 @conference {DanSer04,
  10859         title = {Statistical Disclosure or Intersection Attacks on Anonymity Systems},
  10860         booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)},
  10861         series = {LNCS},
  10862         year = {2004},
  10863         month = {May},
  10864         publisher = {Springer Berlin / Heidelberg},
  10865         organization = {Springer Berlin / Heidelberg},
  10866         address = {Toronto},
  10867         abstract = {In this paper we look at the information an attacker can extract using a statistical disclosure attack. We provide analytical results about the anonymity of users when they repeatedly send messages through a threshold mix following the model of Kesdogan, Agrawal and Penz [7] and through a pool mix. We then present a statistical disclosure attack that can be used to attack models of anonymous communication networks based on pool mixes. Careful approximations make the attack computationally efficient. Such models are potentially better suited to derive results that could apply to the security of real anonymous communication networks},
  10868         www_section = {anonymity, statistical analysis},
  10869         isbn = {978-3-540-24207-9},
  10870         doi = {10.1007/b104759},
  10871         url = {http://www.springerlink.com/content/tqljb3hybk4rubla/},
  10872         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.6.2954.pdf},
  10873         author = {George Danezis and Andrei Serjantov}
  10874 }
  10875 @article {Androutsellis-Theotokis:2004:SPC:1041680.1041681,
  10876         title = {A survey of peer-to-peer content distribution technologies},
  10877         journal = {ACM Computing Surveys},
  10878         volume = {36},
  10879         year = {2004},
  10880         month = dec,
  10881         pages = {335--371},
  10882         publisher = {ACM},
  10883         address = {New York, NY, USA},
  10884         abstract = {Distributed computer architectures labeled "peer-to-peer" are designed for the sharing of computer resources (content, storage, CPU cycles) by direct exchange, rather than requiring the intermediation or support of a centralized server or authority. Peer-to-peer architectures are characterized by their ability to adapt to failures and accommodate transient populations of nodes while maintaining acceptable connectivity and performance.Content distribution is an important peer-to-peer application on the Internet that has received considerable research attention. Content distribution applications typically allow personal computers to function in a coordinated manner as a distributed storage medium by contributing, searching, and obtaining digital content.In this survey, we propose a framework for analyzing peer-to-peer content distribution technologies. Our approach focuses on nonfunctional characteristics such as security, scalability, performance, fairness, and resource management potential, and examines the way in which these characteristics are reflected in---and affected by---the architectural design decisions adopted by current peer-to-peer systems.We study current peer-to-peer systems and infrastructure technologies in terms of their distributed object location and routing mechanisms, their approach to content replication, caching and migration, their support for encryption, access control, authentication and identity, anonymity, deniability, accountability and reputation, and their use of resource trading and management schemes},
  10885         www_section = {content distribution, distributed hash table, DOLR, grid computing, P2P, peer-to-peer networking},
  10886         issn = {0360-0300},
  10887         doi = {http://doi.acm.org/10.1145/1041680.1041681},
  10888         url = {http://doi.acm.org/10.1145/1041680.1041681},
  10889         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACM\%20Computing\%20Surveys\%20-\%20A\%20survey\%20of\%20p2p\%20content\%20distribution\%20technologies.pdf},
  10890         author = {Androutsellis-Theotokis, Stephanos and Spinellis, Diomidis}
  10891 }
  10892 @conference {Tamilmani04swift:a,
  10893         title = {SWIFT: A System With Incentives For Trading},
  10894         booktitle = {P2PECON'04. Proceedings of the 2nd Workshop on Economics of Peer-to-Peer Systems},
  10895         year = {2004},
  10896         month = jun,
  10897         address = {Cambridge, Massachusetts, USA},
  10898         abstract = {In this paper, we present the design of a credit-based trading mechanism for peer-to-peer file sharing networks. We divide files into verifiable pieces; every peer interested in a file requests these pieces individually from the peers it is connected to. Our goal is to build a mechanism that supports fair large scale distribution in which downloads are fast, with low startup latency. We build a trading model in which peers use a pairwise currency to reconcile trading differences with each other and examine various trading strategies that peers can adopt. We show through analysis and simulation that peers who contribute to the network and take risks receive the most benefit in return. Our simulations demonstrate that peers who set high upload rates receive high download rates in return, but free-riders download very slowly compared to peers who upload. Finally, we propose a default trading strategy that is good for both the network as a whole and the peer employing it: deviating from that strategy yields little or no advantage for the peer},
  10899         www_section = {SWIFT, trading},
  10900         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PEcon\%2704\%20-\%20SWIFT.pdf},
  10901         author = {Karthik Tamilmani and Vinay Pai and Alexander E. Mohr}
  10902 }
  10903 @conference {sync-batching,
  10904         title = {Synchronous Batching: From Cascades to Free Routes},
  10905         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10906         series = {LNCS},
  10907         volume = {3424},
  10908         year = {2004},
  10909         month = {May},
  10910         pages = {186--206},
  10911         abstract = {The variety of possible anonymity network topologies has spurred much debate in recent years. In a synchronous batching design, each batch of messages enters the mix network together, and the messages proceed in lockstep through the network. We show that a synchronous batching strategy can be used in various topologies, including a free-route network, in which senders choose paths freely, and a cascade network, in which senders choose from a set of fixed paths. We show that free-route topologies can provide better anonymity as well as better message reliability in the event of partial network failure},
  10912         www_section = {anonymity, network topology},
  10913         doi = {10.1007/b136164},
  10914         url = {http://www.springerlink.com/content/uqvfwe97ehlldm8d/},
  10915         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sync-batching.pdf},
  10916         author = {Roger Dingledine and Vitaly Shmatikov and Paul Syverson}
  10917 }
  10918 @conference {taxonomy-dummy,
  10919         title = {Taxonomy of Mixes and Dummy Traffic},
  10920         booktitle = {Proceedings of I-NetSec04: 3rd Working Conference on Privacy and Anonymity in Networked and Distributed Systems},
  10921         year = {2004},
  10922         month = {August},
  10923         address = {Toulouse, France},
  10924         abstract = {This paper presents an analysis of mixes and dummy traffic policies, which are building blocks of anonymous services. The goal of the paper is to bring together all the issues related to the analysis and design of mix networks. We discuss continuous and pool mixes, topologies for mix networks and dummy traffic policies. We point out the advantages and disadvantages of design decisions for mixes and dummy policies. Finally, we provide a list of research problems that need further work},
  10925         www_section = {anonymity, dummy traffic, mix},
  10926         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.9855},
  10927         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.5.9855.pdf},
  10928         author = {Claudia Diaz and Bart Preneel}
  10929 }
  10930 @conference {timing-fc2004,
  10931         title = {Timing Attacks in Low-Latency Mix-Based Systems},
  10932         booktitle = {Proceedings of Financial Cryptography (FC '04)},
  10933         year = {2004},
  10934         month = feb,
  10935         pages = {251--265},
  10936         publisher = {Springer-Verlag, LNCS 3110},
  10937         organization = {Springer-Verlag, LNCS 3110},
  10938         abstract = {A mix is a communication proxy that attempts to hide the correspondence between its incoming and outgoing messages. Timing attacks are a significant challenge for mix-based systems that wish to support interactive, low-latency applications. However, the potency of these attacks has not been studied carefully. In this paper, we investigate timing analysis attacks on low-latency mix systems and clarify the threat they pose. We propose a novel technique, defensive dropping, to thwart timing attacks. Through simulations and analysis, we show that defensive dropping can be effective against attackers who employ timing analysis},
  10939         isbn = {978-3-540-22420-4},
  10940         doi = {10.1007/b98935},
  10941         url = {http://www.springerlink.com/content/n4khdtwk7dqvj0u0/},
  10942         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/timing-fc2004.pdf},
  10943         author = {Brian Neil Levine and Michael K. Reiter and Chenxi Wang and Matthew Wright},
  10944         editor = {Ari Juels}
  10945 }
  10946 @conference {tor-design,
  10947         title = {Tor: The Second-Generation Onion Router},
  10948         booktitle = {Proceedings of the 13th USENIX Security Symposium},
  10949         year = {2004},
  10950         month = {August},
  10951         publisher = {USENIX Association  Berkeley, CA, USA},
  10952         organization = {USENIX Association  Berkeley, CA, USA},
  10953         abstract = {We present Tor, a circuit-based low-latency anonymous communication service. This second-generation Onion Routing system addresses limitations in the original design by adding perfect forward secrecy, congestion control, directory servers, integrity checking, configurable exit policies, and a practical design for location-hidden services via rendezvous points. Tor works on the real-world Internet, requires no special privileges or kernel modifications, requires little synchronization or coordination between nodes, and provides a reasonable tradeoff between anonymity, usability, and efficiency. We briefly describe our experiences with an international network of more than 30 nodes. We close with a list of open problems in anonymous communication},
  10954         www_section = {onion routing},
  10955         url = {http://portal.acm.org/citation.cfm?id=1251396},
  10956         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tor-design.pdf},
  10957         author = {Roger Dingledine and Nick Mathewson and Paul Syverson}
  10958 }
  10959 @conference {Kiran04totalrecall:,
  10960         title = {Total Recall: System Support for Automated Availability Management},
  10961         booktitle = {In NSDI},
  10962         year = {2004},
  10963         pages = {337--350},
  10964         abstract = {Availability is a storage system property that is both highly desired and yet minimally engineered. While many systems provide mechanisms to improve availability--such as redundancy and failure recovery--how to best configure these mechanisms is typically left to the system manager. Unfortunately, few individuals have the skills to properly manage the trade-offs involved, let alone the time to adapt these decisions to changing conditions. Instead, most systems are configured statically and with only a cursory understanding of how the configuration will impact overall performance or availability. While this issue can be problematic even for individual storage arrays, it becomes increasingly important as systems are distributed--and absolutely critical for the wide-area peer-to-peer storage infrastructures being explored.
  10965 This paper describes the motivation, architecture and implementation for a new peer-to-peer storage system, called TotalRecall, that automates the task of availability management. In particular, the TotalRecall system automatically measures and estimates the availability of its constituent host components, predicts their future availability based on past behavior, calculates the appropriate redundancy mechanisms and repair policies, and delivers user-specified availability while maximizing efficiency},
  10966         www_section = {P2P},
  10967         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.9775},
  10968         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/recall.pdf},
  10969         author = {Ranjita Bhagwan Kiran and Kiran Tati and Yu-chung Cheng and Stefan Savage and Geoffrey M. Voelker}
  10970 }
  10971 @conference {danezis:pet2004,
  10972         title = {The Traffic Analysis of Continuous-Time Mixes},
  10973         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)},
  10974         series = {LNCS},
  10975         volume = {3424},
  10976         year = {2004},
  10977         month = {May},
  10978         pages = {35--50},
  10979         publisher = {Springer Berlin / Heidelberg},
  10980         organization = {Springer Berlin / Heidelberg},
  10981         abstract = {We apply the information-theoretic anonymity metrics to continuous-time mixes, that individually delay messages instead of batching them. The anonymity of such mixes is measured based on their delay characteristics, and as an example the exponential mix (sg-mix) is analysed, simulated and shown to use the optimal strategy. We also describe a practical and powerful traffic analysis attack against connection based continuous-time mix networks, despite the presence of some cover traffic. Assuming a passive observer, the conditions are calculated that make tracing messages through the network possible},
  10982         www_section = {anonymity, traffic analysis},
  10983         isbn = {978-3-540-26203-9},
  10984         doi = {10.1007/b136164},
  10985         url = {http://www.springerlink.com/content/kgenxdaxkyey4ed2/},
  10986         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2004.pdf},
  10987         author = {George Danezis}
  10988 }
  10989 @book {Jiang_trustand,
  10990         title = {Trust and Cooperation in Peer-to-Peer Systems},
  10991         booktitle = {Lecture Notes in Computer Science},
  10992         volume = {3032},
  10993         year = {2004},
  10994         pages = {371--378},
  10995         publisher = {Springer Berlin / Heidelberg},
  10996         organization = {Springer Berlin / Heidelberg},
  10997         abstract = {Most of the past studies on peer-to-peer systems have emphasized routing and lookup. The selfishness of users, which brings on the free riding problem, has not attracted sufficient attention from researchers. In this paper, we introduce a decentralized reputation-based trust model first, in which trust relationships could be built based on the reputation of peers. Subsequently, we use the iterated prisoner's dilemma to model the interactions in peer-to-peer systems and propose a simple incentive mechanism. By simulations, it's shown that the stable cooperation can emerge after limited rounds of interaction between peers by using the incentive mechanism},
  10998         www_section = {cooperation, incentives, iterated prisoner's dilemma, peer-to-peer networking},
  10999         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Jiang\%2C\%20Bai\%20\%26\%20Wang\%20-\%20Trust\%20and\%20Cooperation\%20in\%20Peer-to-Peer\%20Systems.pdf},
  11000         author = {Junjie Jiang and Haihuan Bai and Weinong Wang}
  11001 }
  11002 @conference {GolleJakobssonJuelsSyverson:universal04,
  11003         title = {Universal Re-Encryption for Mixnets},
  11004         booktitle = {Proceedings of the 2004 RSA Conference, Cryptographer's track},
  11005         year = {2004},
  11006         month = feb,
  11007         publisher = {Springer Berlin / Heidelberg},
  11008         organization = {Springer Berlin / Heidelberg},
  11009         address = {San Francisco, USA},
  11010         abstract = {We introduce a new cryptographic technique that we call universal re-encryption. A conventional cryptosystem that permits re-encryption, such as ElGamal, does so only for a player with knowledge of the public key corresponding to a given ciphertext. In contrast, universal re-encryption can be done without knowledge of public keys. We propose an asymmetric cryptosystem with universal re-encryption that is half as efficient as standard ElGamal in terms of computation and storage.
  11011 While technically and conceptually simple, universal re-encryption leads to new types of functionality in mixnet architectures. Conventional mixnets are often called upon to enable players to communicate with one another through channels that are externally anonymous, i.e., that hide information permitting traffic-analysis. Universal re-encryption lets us construct a mixnet of this kind in which servers hold no public or private keying material, and may therefore dispense with the cumbersome requirements of key generation, key distribution, and private-key management. We describe two practical mixnet constructions, one involving asymmetric input ciphertexts, and another with hybrid-ciphertext inputs},
  11012         www_section = {anonymity, private channels, universal re-encryption},
  11013         isbn = {978-3-540-20996-6},
  11014         doi = {10.1007/b95630},
  11015         url = {http://www.springerlink.com/content/1fu5qrb1a2kfe7f9/},
  11016         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GolleJakobssonJuelsSyverson-universal04.pdf},
  11017         author = {Philippe Golle and Jakobsson, Markus and Ari Juels and Paul Syverson}
  11018 }
  11019 @conference {warta04-Klonowski,
  11020         title = {Universal Re-encryption of Signatures and Controlling Anonymous Information Flow},
  11021         booktitle = {Proceedings of WARTACRYPT '04},
  11022         year = {2004},
  11023         month = {July},
  11024         abstract = {Anonymous communication protocols, very essential for preserving privacy of the parties communicating, may lead to severe problems. A malicious server may use anonymous communication protocols for injecting unwelcome messages into the system so that their source can be hardly traced. So anonymity and privacy protection on one side and protection against such phenomena as spam are so far contradictory goals. We propose a mechanism that may be used to limit the mentioned side effects of privacy protection. During the protocol proposed each encrypted message admitted into the system is signed by a respective authority. Then, on its route through the network the encrypted message and the signature are re-encrypted universally. The purpose of universal re-encryption is to hide the routes of the messages from an observer monitoring the traffic. Despite re-encryption, signature of the authority remains valid. Depending on a particular application, verification of the signature is possible either off-line by anybody with the access to the ciphertext and the signature or requires contact with the authority that has issued the signature},
  11025         www_section = {anonymity, information hiding, privacy, re-encryption},
  11026         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.108.4976},
  11027         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.4976.pdf},
  11028         author = {Marek Klonowski and Miroslaw Kutylowski and Anna Lauks and Filip Zagorski}
  11029 }
  11030 @article {Dabek:2004:VDN:1030194.1015471,
  11031         title = {Vivaldi: a decentralized network coordinate system},
  11032         journal = {SIGCOMM Computer Communication Review},
  11033         volume = {34},
  11034         year = {2004},
  11035         month = oct,
  11036         pages = {15--26},
  11037         publisher = {ACM},
  11038         address = {New York, NY, USA},
  11039         abstract = {Large-scale Internet applications can benefit from an ability to predict round-trip times to other hosts without having to contact them first. Explicit measurements are often unattractive because the cost of measurement can outweigh the benefits of exploiting proximity information. Vivaldi is a simple, light-weight algorithm that assigns synthetic coordinates to hosts such that the distance between the coordinates of two hosts accurately predicts the communication latency between the hosts. Vivaldi is fully distributed, requiring no fixed network infrastructure and no distinguished hosts. It is also efficient: a new host can compute good coordinates for itself after collecting latency information from only a few other hosts. Because it requires little com-munication, Vivaldi can piggy-back on the communication patterns of the application using it and scale to a large number of hosts. An evaluation of Vivaldi using a simulated network whose latencies are based on measurements among 1740 Internet hosts shows that a 2-dimensional Euclidean model with height vectors embeds these hosts with low error (the median relative error in round-trip time prediction is 11 percent)},
  11040         www_section = {internet topology, network coordinates, Vivaldi},
  11041         issn = {0146-4833},
  11042         doi = {http://doi.acm.org/10.1145/1030194.1015471},
  11043         url = {http://doi.acm.org/10.1145/1030194.1015471},
  11044         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Compt.\%20Commun.\%20Rev.\%20-\%20Vivaldi.pdf},
  11045         author = {Dabek, Frank and Russ Cox and Frans M. Kaashoek and Robert Morris}
  11046 }
  11047 @conference {1038318,
  11048         title = {Vulnerabilities and Security Threats in Structured Overlay Networks: A Quantitative Analysis},
  11049         booktitle = {ACSAC '04: Proceedings of the 20th Annual Computer Security Applications Conference},
  11050         year = {2004},
  11051         pages = {252--261},
  11052         publisher = {IEEE Computer Society},
  11053         organization = {IEEE Computer Society},
  11054         address = {Washington, DC, USA},
  11055         abstract = {A number of recent applications have been built on distributed hash tables (DHTs) based overlay networks. Almost all DHT-based schemes employ a tight deterministic data placement and ID mapping schemes. This feature on one hand provides assurance on location of data if it exists, within a bounded number of hops, and on the other hand, opens doors for malicious nodes to lodge attacks that can potentially thwart the functionality of the overlay network. This paper studies several serious security threats in DHT-based systems through two targeted attacks at the overlay network's protocol layer. The first attack explores the routing anomalies that can be caused by malicious nodes returning incorrect lookup routes. The second attack targets the ID mapping scheme. We disclose that the malicious nodes can target any specific data item in the system; and corrupt/modify the data item to its favor. For each of these attacks, we provide quantitative analysis to estimate the extent of damage that can be caused by the attack; followed by experimental validation and defenses to guard the overlay networks from such attacks},
  11056         www_section = {distributed hash table, overlay networks, P2P},
  11057         isbn = {0-7695-2252-1},
  11058         doi = {10.1109/CSAC.2004.50},
  11059         url = {http://portal.acm.org/citation.cfm?id=1038254.1038318$\#$},
  11060         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.1198.pdf},
  11061         author = {Srivatsa, Mudhakar and Liu, Ling}
  11062 }
  11063 @conference {Bustamante04wayback:a,
  11064         title = {Wayback: A User-level Versioning File System for Linux},
  11065         booktitle = {In Proceedings of USENIX 2004 (Freenix Track)},
  11066         year = {2004},
  11067         abstract = {In a typical file system, only the current version of a file (or directory) is available. In Wayback, a user can also access any previous version, all the way back to the file's creation time. Versioning is done automatically at the write level: each write to the file creates a new version. Wayback implements versioning using an undo log structure, exploiting the massive space available on modern disks to provide its very useful functionality. Wayback is a user-level file system built on the FUSE framework that relies on an underlying file system for access to the disk. In addition to simplifying Wayback, this also allows it to extend any existing file system with versioning: after being mounted, the file system can be mounted a second time with versioning. We describe the implementation of Wayback, and evaluate its performance using several benchmarks},
  11068         www_section = {file systems, version control},
  11069         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.2672},
  11070         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.11.2672.pdf},
  11071         author = {Fabian Bustamante and Brian Cornell and Brian Cornell and Peter Dinda and Peter Dinda and Fabian Bustamante}
  11072 }
  11073 @conference {Andrade04whencan,
  11074         title = {When Can an Autonomous Reputation Scheme Discourage Free-riding in a Peer-to-Peer System?},
  11075         booktitle = {in: CCGRID '04: Proceedings of the 2004 IEEE International Symposium on Cluster Computing and the Grid, IEEE Computer Society},
  11076         year = {2004},
  11077         pages = {440--448},
  11078         abstract = {We investigate the circumstances under which it is possible to discourage free-riding in a peer-to-peer system for resource-sharing by prioritizing resource allocation to peers with higher reputation. We use a model to predict conditions necessary for any reputation scheme to succeed in discouraging free-riding by this method. We show with simulations that for representative cases, a very simple autonomous reputation scheme works nearly as well at discouraging free-riding as an ideal reputation scheme. Finally, we investigate the expected dynamic behavior of the system},
  11079         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.9659\&rep=rep1\&type=pdf},
  11080         author = {Nazareno Andrade and Miranda Mowbray and Walfredo Cirne and Francisco Brasileiro}
  11081 }
  11082 @conference {fu-active,
  11083         title = {Active Traffic Analysis Attacks and Countermeasures},
  11084         booktitle = {Proceedings of the 2003 International Conference on Computer Networks and Mobile Computing},
  11085         year = {2003},
  11086         month = jan,
  11087         pages = {31--39},
  11088         publisher = {IEEE Computer Society  Washington, DC, USA},
  11089         organization = {IEEE Computer Society  Washington, DC, USA},
  11090         abstract = {To explore mission-critical information, an adversary using active traffic analysis attacks injects probing traffic into the victim network and analyzes the status of underlying payload traffic. Active traffic analysis attacks are easy to deploy and hence become a serious threat to mission critical applications. This paper suggests statistical pattern recognition as a fundamental technology to evaluate effectiveness of active traffic analysis attacks and corresponding countermeasures. Our evaluation shows that sample entropy of ping packets ' round trip time is an effective feature statistic to discover the payload traffic rate. We propose simple countermeasures that can significantly reduce the effectiveness of ping-based active traffic analysis attacks. Our experiments validate the effectiveness of this scheme, which can also be used in other scenarios},
  11091         www_section = {traffic analysis},
  11092         isbn = {0-7695-2033-2},
  11093         url = {http://portal.acm.org/citation.cfm?id=950964},
  11094         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fu-active.pdf},
  11095         author = {Xinwen Fu and Bryan Graham and Riccardo Bettati and Wei Zhao}
  11096 }
  11097 @conference {939011,
  11098         title = {Ad hoc-VCG: a truthful and cost-efficient routing protocol for mobile ad hoc networks with selfish agents},
  11099         booktitle = {MobiCom '03: Proceedings of the 9th annual international conference on Mobile computing and networking},
  11100         year = {2003},
  11101         pages = {245--259},
  11102         publisher = {ACM},
  11103         organization = {ACM},
  11104         address = {New York, NY, USA},
  11105         abstract = {We introduce a game-theoretic setting for routing in a mobile ad hoc network that consists of greedy, selfish agents who accept payments for forwarding data for other agents if the payments cover their individual costs incurred by forwarding data. In this setting, we propose Ad hoc-VCG, a reactive routing protocol that achieves the design objectives of truthfulness (i.e., it is in the agents' best interest to reveal their true costs for forwarding data) and cost-efficiency (i.e., it guarantees that routing is done along the most cost-efficient path) in a game-theoretic sense by paying to the intermediate nodes a premium over their actual costs for forwarding data packets. We show that the total overpayment (i.e., the sum of all premiums paid) is relatively small by giving a theoretical upper bound and by providing experimental evidence. Our routing protocol implements a variation of the well-known mechanism by Vickrey, Clarke, and Groves in a mobile network setting. Finally, we analyze a very natural routing protocol that is an adaptation of the Packet Purse Model [8] with auctions in our setting and show that, unfortunately, it does not achieve cost-efficiency or truthfulness},
  11106         www_section = {ad-hoc networks, energy efficiency, game theory, mechanism design, routing, selfish agents, VCG mechanism},
  11107         isbn = {1-58113-753-2},
  11108         doi = {10.1145/938985.939011},
  11109         url = {http://portal.acm.org/citation.cfm?id=939011$\#$},
  11110         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.103.7483.pdf},
  11111         author = {Anderegg, Luzi and Eidenbenz, Stephan}
  11112 }
  11113 @conference {1251057,
  11114         title = {An analysis of compare-by-hash},
  11115         booktitle = {HOTOS'03: Proceedings of the 9th conference on Hot Topics in Operating Systems},
  11116         year = {2003},
  11117         pages = {3--3},
  11118         publisher = {USENIX Association},
  11119         organization = {USENIX Association},
  11120         address = {Berkeley, CA, USA},
  11121         abstract = {Recent research has produced a new and perhaps dangerous technique for uniquely identifying blocks that I will call compare-by-hash. Using this technique, we decide whether two blocks are identical to each other by comparing their hash values, using a collision-resistant hash such as SHA-1[5]. If the hash values match, we assume the blocks are identical without further ado. Users of compare-by-hash argue that this assumption is warranted because the chance of a hash collision between any two randomly generated blocks is estimated to be many orders of magnitude smaller than the chance of many kinds of hardware errors. Further analysis shows that this approach is not as risk-free as it seems at first glance},
  11122         url = {http://portal.acm.org/citation.cfm?id=1251057$\#$},
  11123         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.100.8338.pdf},
  11124         author = {Henson, Val}
  11125 }
  11126 @conference {Kuegler03ananalysis,
  11127         title = {An Analysis of GNUnet and the Implications for Anonymous, Censorship-Resistant Networks},
  11128         booktitle = {Proceedings of the 3rd International Workshop on Privacy Enhancing Technologies (PET 2003)},
  11129         year = {2003},
  11130         month = jan,
  11131         pages = {161--176},
  11132         publisher = {Springer-Verlag},
  11133         organization = {Springer-Verlag},
  11134         www_section = {anonymity, GNUnet},
  11135         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GNUnet_pet.pdf},
  11136         author = {Dennis K{\"u}gler}
  11137 }
  11138 @conference {fu-analytical,
  11139         title = {Analytical and Empirical Analysis of Countermeasures to Traffic Analysis Attacks},
  11140         booktitle = {Proceedings of the 2003 International Conference on Parallel Processing},
  11141         year = {2003},
  11142         pages = {483--492},
  11143         abstract = {This paper studies countermeasures to traffic analysis attacks. A common strategy for such countermeasures is link padding. We consider systems where payload traffic is padded so that packets have either constant inter-arrival times or variable inter-arrival times. The adversary applies statistical recognition techniques to detect the payload traffic rates by using statistical measures like sample mean, sample variance, or sample entropy. We evaluate quantitatively the ability of the adversary to make a correct detection and derive closed-form formulas for the detection rate based on analytical models. Extensive experiments were carried out to validate the system performance predicted by the analytical method. Based on the systematic evaluations, we develop design guidelines for the proper configuration of a system in order to minimize the detection rate},
  11144         www_section = {traffic analysis},
  11145         isbn = {0-7695-2017-0},
  11146         doi = {10.1109/ICPP.2003.1240613},
  11147         url = {http://www.computer.org/portal/web/csdl/doi?doc=doi/10.1109/ICPP.2003.1240613},
  11148         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fu-analytical.pdf},
  11149         author = {Xinwen Fu and Bryan Graham and Riccardo Bettati and Wei Zhao}
  11150 }
  11151 @conference {SN03,
  11152         title = {On the Anonymity of Timed Pool Mixes},
  11153         booktitle = {Proceedings of the Workshop on Privacy and Anonymity Issues in Networked and Distributed Systems},
  11154         year = {2003},
  11155         month = {May},
  11156         pages = {427--434},
  11157         publisher = {Kluwer},
  11158         organization = {Kluwer},
  11159         address = {Athens, Greece},
  11160         abstract = {This paper presents a method for calculating the anonymity of a timed pool mix. Thus we are able to compare it to a threshold pool mix, and any future mixes that might be developed. Although we are only able to compute the anonymity of a timed pool mix after some specic number of rounds, this is a practical approximation to the real anonymity},
  11161         www_section = {anonymity, mix},
  11162         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.5699},
  11163         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.9.5699.pdf},
  11164         author = {Andrei Serjantov and Richard E. Newman}
  11165 }
  11166 @conference {Hildrum03asymptoticallyefficient,
  11167         title = {Asymptotically Efficient Approaches to Fault-Tolerance in Peer-to-Peer},
  11168         booktitle = {In Proc. of DISC},
  11169         year = {2003},
  11170         pages = {321--336},
  11171         abstract = {In this paper, we show that two peer-to-peer systems, Pastry [13] and Tapestry [17] can be made tolerant to certain classes of failures and a limited class of attacks. These systems are said to operate properly if they can find the closest node matching a requested ID. The system must also be able to dynamically construct the necessary routing information when new nodes enter or the network changes. We show that with an additional factor of storage overhead and communication overhead, they can continue to achieve both of these goals in the presence of a constant fraction nodes that do not obey the protocol. Our techniques are similar in spirit to those of Saia et al. [14] and Naor and Wieder [10]. Some simple simulations show that these techniques are useful even with constant overhead},
  11172         www_section = {fault-tolerance, P2P},
  11173         isbn = {978-3-540-20184-7},
  11174         doi = {10.1007/b13831},
  11175         url = {http://www.springerlink.com/content/7emt7u01cvbb6bu6/},
  11176         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.334.pdf},
  11177         author = {Hildrum, Kirsten and John Kubiatowicz}
  11178 }
  11179 @booklet {Hurler_automaticcontext,
  11180         title = {Automatic Context Integration for Group Aware Environments},
  11181         year = {2003},
  11182         abstract = {Tele-collaboration is a valuable tool that can connect learners at different sites and help them benefit from their respective competences. Albeit many e-learning applications provide a high level of technical sophistication, such tools typically fall short of reflecting the learners ' full context, e.g., their presence and awareness. Hence, these applications cause many disturbances in the social interaction of the learners. This paper describes mechanisms to improve the group awareness in elearning environments with the help of automatic integration of such context information from the physical world. This information is gathered by different embedded sensors in various objects, e.g., a coffee mug or an office chair. This paper also describes first results of the integration of these sensors into an existing CSCW/CSCL framework},
  11183         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1450},
  11184         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hurler03context.pdf},
  11185         author = {Bernhard Hurler and Leo Petrak and Thomas Fuhrmann and Oliver Brand and Martina Zitterbart}
  11186 }
  11187 @conference {2003_0,
  11188         title = {Bootstrapping a Distributed Computational Economy with Peer-to-Peer Bartering},
  11189         booktitle = {Proceedings of the 1st Worshop on Economics of Peer-to-Peer Systems},
  11190         year = {2003},
  11191         month = jun,
  11192         address = {Berkeley, CA, USA},
  11193         www_section = {bartering, distributed computational economies, peer-to-peer bartering, resource discovery, resource exchange, resource peering},
  11194         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Workshop\%20on\%20Economics\%20of\%20P2P\%20Systems\%2703\%20-\%20Chun\%2C\%20Fu\%20\%26\%20Vahdat.pdf},
  11195         author = {Chun, Brent and Yun Fu and Vahdat, Amin}
  11196 }
  11197 @conference {nguyen:pet2003,
  11198         title = {Breaking and Mending Resilient Mix-nets},
  11199         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  11200         year = {2003},
  11201         month = mar,
  11202         pages = {66--80},
  11203         publisher = {Springer-Verlag, LNCS 2760},
  11204         organization = {Springer-Verlag, LNCS 2760},
  11205         abstract = {In this paper we show two attacks against universally resilient mix-nets. The first attack can be used against a number of mix-nets, including Furukawa-Sako01 [6], Millimix [11], Abe98 [1], MiP-1, MiP-2 [2,3] and Neff01 [19]. We give the details of the attack in the case of Furukawa-Sako01 mix-net. The second attack breaks the correctness of Millimix [11]. We show how to counter these attacks, and give efficiency and security analysis for the proposed countermeasures},
  11206         www_section = {attack, security analysis},
  11207         isbn = {978-3-540-20610-1},
  11208         doi = {10.1007/b94512},
  11209         url = {http://www.springerlink.com/content/0e0mwvgyt008wxkf/},
  11210         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nguyen-pet2003.pdf},
  11211         author = {Lan Nguyen and Rei Safavi-Naini},
  11212         editor = {Roger Dingledine}
  11213 }
  11214 @article {Prabhakar01buildinglow-diameter,
  11215         title = {Building Low-Diameter P2P Networks},
  11216         journal = { IEEE Journal on Selected Areas in Communications  },
  11217         volume = {21},
  11218         year = {2003},
  11219         month = aug,
  11220         pages = {995--1002},
  11221         abstract = {Scheme to build dynamic, distributed P2P networks of constant degree and logarithmic diameter},
  11222         url = {http://www.cs.brown.edu/people/eli/papers/focs01.pdf},
  11223         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/focs01.pdf},
  11224         author = {Gopal Pandurangan and Prabhakar Raghavan and Eli Upfal}
  11225 }
  11226 @conference {Kostic:2003:BHB:945445.945473,
  11227         title = {Bullet: High Bandwidth Data Dissemination Using an Overlay Mesh},
  11228         booktitle = {SOSP'03. Proceedings of the 19th ACM Symposium on Operating Systems Principles},
  11229         series = {SOSP '03},
  11230         year = {2003},
  11231         month = oct,
  11232         pages = {282--297},
  11233         publisher = {ACM},
  11234         organization = {ACM},
  11235         address = {Bolton Landing, NY, USA},
  11236         abstract = {In recent years, overlay networks have become an effective alternative to IP multicast for efficient point to multipoint communication across the Internet. Typically, nodes self-organize with the goal of forming an efficient overlay tree, one that meets performance targets without placing undue burden on the underlying network. In this paper, we target high-bandwidth data distribution from a single source to a large number of receivers. Applications include large-file transfers and real-time multimedia streaming. For these applications, we argue that an overlay mesh, rather than a tree, can deliver fundamentally higher bandwidth and reliability relative to typical tree structures. This paper presents Bullet, a scalable and distributed algorithm that enables nodes spread across the Internet to self-organize into a high bandwidth overlay mesh. We construct Bullet around the insight that data should be distributed in a disjoint manner to strategic points in the network. Individual Bullet receivers are then responsible for locating and retrieving the data from multiple points in parallel.Key contributions of this work include: i) an algorithm that sends data to different points in the overlay such that any data object is equally likely to appear at any node, ii) a scalable and decentralized algorithm that allows nodes to locate and recover missing data items, and iii) a complete implementation and evaluation of Bullet running across the Internet and in a large-scale emulation environment reveals up to a factor two bandwidth improvements under a variety of circumstances. In addition, we find that, relative to tree-based solutions, Bullet reduces the need to perform expensive bandwidth probing. In a tree, it is critical that a node's parent delivers a high rate of application data to each child. In Bullet however, nodes simultaneously receive data from multiple sources in parallel, making it less important to locate any single source capable of sustaining a high transmission rate},
  11237         www_section = {BANDWIDTH, bullet, overlays, peer-to-peer networking},
  11238         isbn = {1-58113-757-5},
  11239         doi = {http://doi.acm.org/10.1145/945445.945473},
  11240         url = {http://doi.acm.org/10.1145/945445.945473},
  11241         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SOSP\%2703\%20-\%20Bullet.pdf},
  11242         author = {Kosti{\'c}, Dejan and Rodriguez, Adolfo and Albrecht, Jeannie and Vahdat, Amin}
  11243 }
  11244 @article {buses03,
  11245         title = {Buses for Anonymous Message Delivery},
  11246         journal = {Journal of Cryptology},
  11247         volume = {16},
  11248         number = {1},
  11249         year = {2003},
  11250         pages = {25--39},
  11251         abstract = {This work develops a novel approach to hide the senders and the receivers of messages. The intuition is taken from an everyday activity that hides the {\textquoteleft}{\textquoteleft}communication pattern''{\textemdash}the public transportation system. To describe our protocols, buses are used as a metaphor: Buses, i.e., messages, are traveling on the network, each piece of information is allocated a seat within the bus. Routes are chosen and buses are scheduled to traverse these routes. Deterministic and randomized protocols are presented, the protocols differ in the number of buses in the system, the worst case traveling time, and the required buffer size in a {\textquoteleft}{\textquoteleft}station.'' In particular, a protocol that is based on cluster partition of the network is presented; in this protocol there is one bus traversing each cluster. The clusters' size in the partition gives time and communication tradeoffs. One advantage of our protocols over previous works is that they are not based on statistical properties for the communication pattern. Another advantage is that they only require the processors in the communication network to be busy periodically},
  11252         www_section = {privacy, traffic analysis},
  11253         issn = {0933-2790},
  11254         doi = {10.1007/s00145-002-0128-6},
  11255         url = {http://www.springerlink.com/content/eljjgl3ec01c00xa/},
  11256         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.80.1566.pdf},
  11257         author = {Amos Beimel and Shlomi Dolev}
  11258 }
  11259 @conference {778418,
  11260         title = {A charging and rewarding scheme for packet forwarding in multi-hop cellular networks},
  11261         booktitle = {MobiHoc '03: Proceedings of the 4th ACM international symposium on Mobile ad hoc networking \& computing},
  11262         year = {2003},
  11263         pages = {13--24},
  11264         publisher = {ACM},
  11265         organization = {ACM},
  11266         address = {New York, NY, USA},
  11267         abstract = {In multi-hop cellular networks, data packets have to be relayed hop by hop from a given mobile station to a base station and vice-versa. This means that the mobile stations must accept to forward information for the benefit of other stations. In this paper, we propose an incentive mechanism that is based on a charging/rewarding scheme and that makes collaboration rational for selfish nodes. We base our solution on symmetric cryptography to cope with the limited resources of the mobile stations. We provide a set of protocols and study their robustness with respect to various attacks. By leveraging on the relative stability of the routes, our solution leads to a very moderate overhead},
  11268         www_section = {ad-hoc networks, charging, cooperation, hybrid cellular networks, multi-hop networks, packet forwarding},
  11269         isbn = {1-58113-684-6},
  11270         doi = {10.1145/778415.778418},
  11271         url = {http://portal.acm.org/citation.cfm?id=778418$\#$},
  11272         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BenSalemBHJ03mobihoc.pdf},
  11273         author = {Salem, Naouel Ben and Levente Butty{\'a}n and Jean-Pierre Hubaux and Jakobsson, Markus}
  11274 }
  11275 @conference {kutzner03connecting,
  11276         title = {Connecting Vehicle Scatternets by Internet-Connected Gateways},
  11277         booktitle = {Workshop on Multiradio Multimedia Communications MMC 2003},
  11278         year = {2003},
  11279         type = {publication},
  11280         address = {University of Dortmund, Germany},
  11281         abstract = {This paper presents an approach for interconnecting isolated clouds of an ad hoc network that form a scatternet topology using Internet gateways as intermediate nodes. The architecture developed is intended to augment FleetNet, a highly dynamic ad hoc network for inter-vehicle communications. This is achieved by upgrading FleetNet capabilities to establish a communication path between moving vehicles and the Internet via Internet gateways to facilitate direct gateway to gateway communications via the Internet, thus bridging gaps in the network topology and relaying packets closer towards their geographical destination at the same time. After outlining the overall FleetNet approach and its underlying geographical multi-hop routing, we focus on the FleetNet gateway architecture. We describe required modifications to the gateway architecture and to the FleetNet network layer in order to use these gateways as intermediate nodes for FleetNet routing. Finally, we conclude the paper by a short discussion on the prototype gateway implementation and by summarizing first results and ongoing work on inter scatternet communication},
  11282         url = {http://i30www.ira.uka.de/research/publications/p2p/},
  11283         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner03connecting.pdf},
  11284         author = {Kendy Kutzner and Jean-Jacques Tchouto and Marc Bechler and Lars Wolf and Bernd Bochow and Thomas Luckenbach}
  11285 }
  11286 @conference {1247343,
  11287         title = {A cooperative internet backup scheme},
  11288         booktitle = {ATEC '03: Proceedings of the annual conference on USENIX Annual Technical Conference},
  11289         year = {2003},
  11290         pages = {3--3},
  11291         publisher = {USENIX Association},
  11292         organization = {USENIX Association},
  11293         address = {Berkeley, CA, USA},
  11294         abstract = {We present a novel peer-to-peer backup technique that allows computers connected to the Internet to back up their data cooperatively: Each computer has a set of partner computers, which collectively hold its backup data. In return, it holds a part of each partner's backup data. By adding redundancy and distributing the backup data across many partners, a highly-reliable backup can be obtained in spite of the low reliability of the average Internet machine.
  11295 
  11296 Because our scheme requires cooperation, it is potentially vulnerable to several novel attacks involving free riding (e.g., holding a partner's data is costly, which tempts cheating) or disruption. We defend against these attacks using a number of new methods, including the use of periodic random challenges to ensure partners continue to hold data and the use of disk-space wasting to make cheating unprofitable. Results from an initial prototype show that our technique is feasible and very inexpensive: it appears to be one to two orders of magnitude cheaper than existing Internet backup services},
  11297         www_section = {backup, P2P, redundancy},
  11298         url = {http://portal.acm.org/citation.cfm?id=1247343$\#$},
  11299         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lillibridge.pdf},
  11300         author = {Mark Lillibridge and Sameh Elnikety and Andrew D. Birrell and Mike Burrows and Isard, Michael}
  11301 }
  11302 @conference {Leibowitz:2003:DKN:832311.837393,
  11303         title = {Deconstructing the Kazaa Network},
  11304         booktitle = {WIAPP'03--Proceedings of the The Third IEEE Workshop on Internet Applications},
  11305         series = {WIAPP '03},
  11306         year = {2003},
  11307         month = jun,
  11308         pages = {0--112},
  11309         publisher = {IEEE Computer Society},
  11310         organization = {IEEE Computer Society},
  11311         address = {San Jos{\'e}, CA, USA},
  11312         abstract = {Internet traffic is experiencing a shift from webtraffic to file swapping traffic. Today a significant partof Internet traffic is generated by peer-to-peer applications, mostly by the popular Kazaa application.Yet, to date, few studies analyze Kazaa traffic, thusleaving the bulk of Internet traffic in dark. We presenta large-scale investigation of Kazaa traffic based onlogs collected at a large Israeli ISP, which captureroughly a quarter of all traffic between Israel and US},
  11313         www_section = {file swapping traffic, kazaa, traffic},
  11314         isbn = {0-7695-1972-5},
  11315         url = {http://dl.acm.org/citation.cfm?id=832311.837393},
  11316         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WIAPP\%2703\%20-\%20Deconstructing\%20the\%20Kazaa\%20network.pdf},
  11317         author = {Leibowitz, Nathaniel and Ripeanu, Matei and Wierzbicki, Adam}
  11318 }
  11319 @conference {wright03,
  11320         title = {Defending Anonymous Communication Against Passive Logging Attacks},
  11321         booktitle = {Proceedings of the 2003 IEEE Symposium on Security and Privacy},
  11322         year = {2003},
  11323         month = may,
  11324         pages = {28--43},
  11325         publisher = {IEEE Computer Society  Washington, DC, USA},
  11326         organization = {IEEE Computer Society  Washington, DC, USA},
  11327         abstract = {We study the threat that passive logging attacks poseto anonymous communications. Previous work analyzedthese attacks under limiting assumptions. We first describea possible defense that comes from breaking the assumptionof uniformly random path selection. Our analysisshows that the defense improves anonymity in the staticmodel, where nodes stay in the system, but fails in a dynamicmodel, in which nodes leave and join. Additionally,we use the dynamic model to show that the intersectionattack creates a vulnerability in certain peer-to-peer systemsfor anonymous communciations. We present simulationresults that show that attack times are significantlylower in practice than the upper bounds given by previouswork. To determine whether users' web traffic has communicationpatterns required by the attacks, we collectedand analyzed the web requests of users. We found that,for our study, frequent and repeated communication to thesame web site is common},
  11328         www_section = {attack, P2P},
  11329         isbn = {0-7695-1940-7},
  11330         url = {http://portal.acm.org/citation.cfm?id=830556},
  11331         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wright-passive.pdf , https://git.gnunet.org/bibliography.git/plain/docs/wright-passive2.pdf},
  11332         author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}
  11333 }
  11334 @conference {863960,
  11335         title = {A delay-tolerant network architecture for challenged internets},
  11336         booktitle = {SIGCOMM '03: Proceedings of the 2003 conference on Applications, technologies, architectures, and protocols for computer communications},
  11337         year = {2003},
  11338         pages = {27--34},
  11339         publisher = {ACM},
  11340         organization = {ACM},
  11341         address = {New York, NY, USA},
  11342         abstract = {The highly successful architecture and protocols of today's Internet may operate poorly in environments characterized by very long delay paths and frequent network partitions. These problems are exacerbated by end nodes with limited power or memory resources. Often deployed in mobile and extreme environments lacking continuous connectivity, many such networks have their own specialized protocols, and do not utilize IP. To achieve interoperability between them, we propose a network architecture and application interface structured around optionally-reliable asynchronous message forwarding, with limited expectations of end-to-end connectivity and node resources. The architecture operates as an overlay above the transport layers of the networks it interconnects, and provides key services such as in-network data storage and retransmission, interoperable naming, authenticated forwarding and a coarse-grained class of service},
  11343         isbn = {1-58113-735-4},
  11344         doi = {10.1145/863955.863960},
  11345         url = {http://portal.acm.org/citation.cfm?id=863960$\#$},
  11346         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IRB-TR-03-003.pdf},
  11347         author = {Fall, Kevin}
  11348 }
  11349 @booklet { roca03design,
  11350         title = {Design and evaluation of a low density generator matrix},
  11351         year = {2003},
  11352         abstract = {Traditional small block Forward Error Correction (FEC) codes, like the Reed-Solomon erasure (RSE) code, are known to raise efficiency problems, in particular when they are applied to the Asynchronous Layered Coding (ALC) reliable multicast protocol. In this paper we describe the design of a simple large block Low Density Generator Matrix (LDGM) codec, a particular case of LDPC code, which is capable of operating on source blocks that are several tens of megabytes long. We also explain how the iterative decoding feature of LDGM/LDPC can be used to protect a large number of small independent objects during time-limited partially-reliable sessions. We illustrate this feature with an example derived from a video streaming scheme over ALC. We then evaluate our LDGM codec and compare its performances with a well known RSE codec. Tests focus on the global efficiency and on encoding/decoding performances. This paper deliberately skips theoretical aspects to focus on practical results. It shows that LDGM/LDPC open many opportunities in the area of bulk data multicasting},
  11353         www_section = {ALC, FEC, large block FEC codes, LDGM, LDPC, reliable multicast},
  11354         isbn = {978-3-540-20051-2},
  11355         doi = {10.1007/b13249},
  11356         url = {http://www.springerlink.com/content/tdemq6m8b20320hb/},
  11357         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ngc03_ldpc_slides_03sep18_4s.pdf},
  11358         author = {Vincent Roca and Zainab Khallouf and Julien Laboure}
  11359 }
  11360 @booklet {_,
  11361         title = {A DHT-based Backup System},
  11362         year = {2003},
  11363         abstract = {Distributed hashtables have been proposed as a way to simplify the construction of large-scale distributed applications(e.g.[1,6]). DHTs are completely decentralized systems that provide block storage on a changing collection of nodes spread throughout the Internet. Each block is identified by aunique key. DHTs spread the load of storing and serving blocks across all of the active nodes and keep the blocks available as nodes join and leave the system.
  11364 
  11365 This paper presents the design and implementation of a cooperative off-site backup system, Venti-DHash. Venti-DHash is based on a DHT infrastructure and is designed to support recovery of data after a disaster by keeping regular snapshots of filesystems distributed off-site, on peers on the Internet. Where as conventional backup systems incur significant equipment costs, manual effort and high administrative overhead, we hope that a distributed backup system can alleviate these problems, making backups easy and feasible. By building this system on top of a DHT, the backup application inherits the properties of the DHT, and serves to evaluate the feasibility of using a DHT to build larg escale applications},
  11366         www_section = {backup, distributed hash table},
  11367         url = {http://doc.cat-v.org/plan_9/misc/venti-dhash/},
  11368         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.104.8086.pdf},
  11369         author = {Emil Sit and Josh Cates and Russ Cox}
  11370 }
  11371 @conference {Acquisti03onthe,
  11372         title = {On the Economics of Anonymity},
  11373         booktitle = {Financial Cryptography. Springer-Verlag, LNCS 2742},
  11374         year = {2003},
  11375         pages = {84--102},
  11376         abstract = {Decentralized anonymity infrastructures are still not in wide use today. While there are technical barriers to a secure robust design, our lack of understanding of the incentives to participate in such systems remains a major roadblock. Here we explore some reasons why anonymity systems are particularly hard to deploy, enumerate the incentives to participate either as senders or also as nodes, and build a general model to describe the effects of these incentives. We then describe and justify some simplifying assumptions to make the model manageable, and compare optimal strategies for participants based on a variety of scenarios},
  11377         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.13.5636\&rep=rep1\&type=pdf},
  11378         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.5636.pdf},
  11379         author = {Alessandro Acquisti and Roger Dingledine and Paul Syverson}
  11380 }
  11381 @conference {Buchegger03theeffect,
  11382         title = {The Effect of Rumor Spreading in Reputation Systems for Mobile Ad-Hoc Networks},
  11383         booktitle = {In Proceedings of WiOpt {\textquoteleft}03: Modeling and Optimization in Mobile, Ad Hoc and Wireless Networks{\textquotedblright}, Sophia-Antipolis},
  11384         year = {2003},
  11385         abstract = {Mobile ad-hoc networks rely on the cooperation of nodes for routing and forwarding. For individual nodes there are however several advantages resulting from noncooperation, the most obvious being power saving. Nodes that act selfishly or even maliciously pose a threat to availability in mobile ad-hoc networks. Several approaches have been proposed to detect noncooperative nodes. In this paper, we investigate the effect of using rumors with respect to the detection time of misbehaved nodes as well as the robustness of the reputation system against wrong accusations. We propose a Bayesian approach for reputation representation, updates, and view integration. We also present a mechanism to detect and exclude potential lies. The simulation results indicate that by using this Bayesian approach, the reputation system is robust against slander while still benefitting from the speed-up in detection time provided by the use of rumors},
  11386         www_section = {ad-hoc networks, reputation, robustness},
  11387         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.9006},
  11388         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.9006_0.pdf},
  11389         author = {Sonja Buchegger and Jean-Yves Le Boudec}
  11390 }
  11391 @conference {2003_1,
  11392         title = {The effect of rumor spreading in reputation systems for mobile ad-hoc networks},
  11393         booktitle = {Proceedings of WiOpt {\textquoteleft}03: Modeling and Optimization in Mobile, Ad Hoc and Wireless Networks},
  11394         year = {2003},
  11395         month = mar,
  11396         address = {Sophia-Antipolis, France},
  11397         abstract = {Mobile ad-hoc networks rely on the cooperation of nodes for routing and forwarding. For individual nodes there are however several advantages resulting from noncooperation, the most obvious being power saving. Nodes that act selfishly or even maliciously pose a threat to availability in mobile adhoc networks. Several approaches have been proposed to detect noncooperative nodes. In this paper, we investigate the e$\#$ect of using rumors with respect to the detection time of misbehaved nodes as well as the robustness of the reputation system against wrong accusations. We propose a Bayesian approach for reputation representation, updates, and view integration. We also present a mechanism to detect and exclude potential lies. The simulation results indicate that by using this Bayesian approach, the reputation system is robust against slander while still benefitting from the speed-up in detection time provided by the use of rumors},
  11398         www_section = {mobile Ad-hoc networks, reputation, reputation system, rumor},
  11399         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WiOpt\%2703\%20-\%20Buchegger\%20\%26\%20Le\%20Boudec\%20-\%20Reputation\%20Systems.pdf},
  11400         author = {Sonja Buchegger and Jean-Yves Le Boudec}
  11401 }
  11402 @conference {Kwon:2003:EPF:827273.829221,
  11403         title = {An Efficient Peer-to-Peer File Sharing Exploiting Hierarchy and Asymmetry},
  11404         booktitle = {SAINT'03. Proceedings of the 2003 Symposium on Applications and the Internet},
  11405         series = {SAINT '03},
  11406         year = {2003},
  11407         month = jan,
  11408         pages = {0--226},
  11409         publisher = {IEEE Computer Society},
  11410         organization = {IEEE Computer Society},
  11411         address = {Orlando, Florida, USA},
  11412         abstract = {Many Peer-to-Peer (P2P) file sharing systems have been proposed to take advantage of high scalability and abundant resources at end-user machines. Previous approaches adopted either simple flooding or routing with complex structures, such as Distributed HashingTables (DHT). However, these approaches did not consider the heterogeneous nature of the machines and the hierarchy of networks on the Internet. This paper presents Peer-to-peer Asymmetric file Sharing System(PASS), a novel approach to P2P file sharing, which accounts for the different capabilities and network locations of the participating machines. Our system selects only a portion of high-capacity machines(supernodes) for routing support, and organizes the network by using location information. We show that our key-coverage based directory replication improves the file search performance to a small constant number of routing hops, regardless of the network size},
  11413         www_section = {asymmetry, hierarchy, P2P, pass, peer-to-peer asymmetric file sharing system, peer-to-peer networking},
  11414         isbn = {0-7695-1872-9},
  11415         doi = {http://doi.ieeecomputersociety.org/10.1109/SAINT.2003.1183054},
  11416         url = {http://dl.acm.org/citation.cfm?id=827273.829221},
  11417         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SAINT\%2703\%20-\%20Kwon\%20\%26\%20Ryu.pdf},
  11418         author = {Kwon, Gisik and Ryu, Kyung D.}
  11419 }
  11420 @conference {Kamvar:2003:EAR:775152.775242,
  11421         title = {The EigenTrust algorithm for reputation management in P2P networks},
  11422         booktitle = {WWW'03. Proceedings of the 12th International Conference on World Wide Web},
  11423         series = {WWW '03},
  11424         year = {2003},
  11425         month = may,
  11426         pages = {640--651},
  11427         publisher = {ACM},
  11428         organization = {ACM},
  11429         address = {Budapest, Hungary},
  11430         abstract = {Peer-to-peer file-sharing networks are currently receiving much attention as a means of sharing and distributing information. However, as recent experience shows, the anonymous, open nature of these networks offers an almost ideal environment for the spread of self-replicating inauthentic files.We describe an algorithm to decrease the number of downloads of inauthentic files in a peer-to-peer file-sharing network that assigns each peer a unique global trust value, based on the peer's history of uploads. We present a distributed and secure method to compute global trust values, based on Power iteration. By having peers use these global trust values to choose the peers from whom they download, the network effectively identifies malicious peers and isolates them from the network.In simulations, this reputation system, called EigenTrust, has been shown to significantly decrease the number of inauthentic files on the network, even under a variety of conditions where malicious peers cooperate in an attempt to deliberately subvert the system},
  11431         www_section = {distributed eigenvector computation, peer-to-peer networking, reputation},
  11432         isbn = {1-58113-680-3},
  11433         doi = {http://doi.acm.org/10.1145/775152.775242},
  11434         url = {http://doi.acm.org/10.1145/775152.775242},
  11435         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WWW\%2703\%20-\%20The\%20EigenTrust\%20algorithm.pdf},
  11436         author = {Kamvar, Sepandar D. and Schlosser, Mario T. and Hector Garcia-Molina}
  11437 }
  11438 @conference {948119,
  11439         title = {Establishing pairwise keys in distributed sensor networks},
  11440         booktitle = {CCS '03: Proceedings of the 10th ACM conference on Computer and communications security},
  11441         year = {2003},
  11442         pages = {52--61},
  11443         publisher = {ACM},
  11444         organization = {ACM},
  11445         address = {New York, NY, USA},
  11446         abstract = {Pairwise key establishment is a fundamental security service in sensor networks; it enables sensor nodes to communicate securely with each other using cryptographic techniques. However, due to the resource constraints on sensors, it is infeasible to use traditional key management techniques such as public key cryptography and key distribution center (KDC). To facilitate the study of novel pairwise key predistribution techniques, this paper presents a general framework for establishing pairwise keys between sensors on the basis of a polynomial-based key predistribution protocol [2]. This paper then presents two efficient instantiations of the general framework: a random subset assignment key predistribution scheme and a grid-based key predistribution scheme. The analysis in this paper indicates that these two schemes have a number of nice properties, including high probability (or guarantee) to establish pairwise keys, tolerance of node captures, and low communication overhead. Finally, this paper presents a technique to reduce the computation at sensors required by these schemes},
  11447         www_section = {key management, probabilistic key sharing, sensor networks},
  11448         isbn = {1-58113-738-9},
  11449         doi = {10.1145/948109.948119},
  11450         url = {http://portal.acm.org/citation.cfm?id=948119$\#$},
  11451         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs03-SNKeyMan.pdf},
  11452         author = {Liu, Donggang and Peng Ning}
  11453 }
  11454 @article {2003_2,
  11455         title = {The evolution of altruistic punishment},
  11456         journal = {Proceedings of the National Academy of Sciences of the USA},
  11457         volume = {100},
  11458         year = {2003},
  11459         month = mar,
  11460         pages = {3531--3535},
  11461         abstract = {Both laboratory and field data suggest that people punish noncooperators even in one-shot interactions. Although such {\textquotedblleft}altruistic punishment{\textquotedblright} may explain the high levels of cooperation in human societies, it creates an evolutionary puzzle: existing models suggest that altruistic cooperation among nonrelatives is evolutionarily stable only in small groups. Thus, applying such models to the evolution of altruistic punishment leads to the prediction that people will not incur costs to punish others to provide benefits to large groups of nonrelatives. However, here we show that an important asymmetry between altruistic cooperation and altruistic punishment allows altruistic punishment to evolve in populations engaged in one-time, anonymous interactions. This process allows both altruistic punishment and altruistic cooperation to be maintained even when groups are large and other parameter values approximate conditions that characterize cultural evolution in the small-scale societies in which humans lived for most of our prehistory},
  11462         www_section = {altruistic cooperation, altruistic punishment, cooperation, human society, nonrelatives},
  11463         doi = {10.1073/pnas.0630443100},
  11464         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PNAS\%20\%282003\%29\%20-\%20The\%20evolution\%20of\%20altruistic\%20punishment.pdf},
  11465         author = {Robert Boyd and Herbert Gintis and Samuel Bowles and Peter J. Richerson}
  11466 }
  11467 @article { ebe2003,
  11468         title = {An Excess-Based Economic Model for Resource Allocation in Peer-to-Peer Networks},
  11469         journal = {Wirtschaftsinformatik},
  11470         volume = {3-2003},
  11471         year = {2003},
  11472         month = {June},
  11473         publisher = {Vieweg-Verlag},
  11474         abstract = {This paper describes economic aspects of GNUnet, a peer-to-peer framework for anonymous distributed file-sharing.  GNUnet is decentralized; all nodes are equal peers. In particular, there are no trusted entities in the network. This paper describes an economic model to perform resource allocation and defend against malicious
  11475 participants in this context.  The approach presented does not use credentials or payments; rather, it is based on trust.  The design is much like that of a cooperative game in which peers take the role of players. Nodes must cooperate to achieve individual goals.  In such a scenario, it is important to be able to distinguish between nodes exhibiting friendly behavior and those exhibiting malicious behavior.
  11476 
  11477 GNUnet aims to provide anonymity for its users.  Its design
  11478 makes it hard to link a transaction to the node where it originated from.  While anonymity requirements make a global view of the end-points of a transaction infeasible, the local link-to-link messages can be fully authenticated.  Our economic model is based entirely on this local view of the network and takes only local
  11479 decisions},
  11480         www_section = {anonymity, file-sharing, GNUnet},
  11481         url = {http://grothoff.org/christian/ebe.pdf},
  11482         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ebe.pdf},
  11483         author = {Christian Grothoff}
  11484 }
  11485 @booklet {Peterson03ext3cow:the,
  11486         title = {Ext3cow: The Design, Implementation, and Analysis of Metadata for a Time-Shifting File System},
  11487         year = {2003},
  11488         abstract = {The ext3cow file system, built on Linux's popular ext3 file system, brings snapshot functionality and file versioning to the open-source community. Our implementation of ext3cow has several desirable properties: ext3cow is implemented entirely in the file system and, therefore, does not modify kernel interfaces or change the operation of other file systems; ext3cow provides a time-shifting interface that permits access to data in the past without polluting the file system namespace; and, ext3cow creates versions of files on disk without copying data in memory. Experimental results show that the time-shifting functions of ext3cow do not degrade file system performance. Ext3cow performs comparably to ext3 on many file system benchmarks and trace driven experiments},
  11489         www_section = {file systems},
  11490         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.2545},
  11491         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.2545.pdf},
  11492         author = {Zachary N. J. Peterson and Randal C. Burns}
  11493 }
  11494 @article {2003_3,
  11495         title = {Extremum Feedback with Partial Knowledge},
  11496         volume = {Volume 2816/2003},
  11497         year = {2003},
  11498         abstract = {A scalable feedback mechanism to solicit feedback from a potentially very large group of networked nodes is an important building block for many network protocols. Multicast transport protocols use it for negative acknowledgements and for delay and packet loss determination. Grid computing and peer-to-peer applications can use similar approaches to find nodes that are, at a given moment in time, best suited to serve a request. In sensor networks, such mechanisms allow to report extreme values in a resource efficient way.
  11499 In this paper we analyze several extensions to the exponential feedback algorithm [5,6] that provide an optimal way to collect extreme values from a potentially very large group of networked nodes. In contrast to prior work, we focus on how knowledge about the value distribution in the group can be used to optimize the feedback process. We describe the trade-offs that have to be decided upon when using these extensions and provide additional insight into their performance by means of simulation. Furthermore, we briefly illustrate how sample applications can benefit from the proposed mechanisms},
  11500         isbn = {978-3-540-20051-2},
  11501         issn = {0302-9743 },
  11502         doi = {10.1007/b13249},
  11503         journal = {unknown},
  11504         url = {http://www.springerlink.com/content/bvelyaew4ukl4aau/},
  11505         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03feedback.pdf},
  11506         author = {Thomas Fuhrmann and J{\"o}rg Widmer}
  11507 }
  11508 @conference {2003_4,
  11509         title = {A game theoretic framework for incentives in P2P systems},
  11510         booktitle = {Proceedings of the 3rd International Conference on Peer-to-Peer Computing},
  11511         year = {2003},
  11512         month = sep,
  11513         pages = {48--56},
  11514         publisher = {IEEE Computer Society},
  11515         organization = {IEEE Computer Society},
  11516         address = {Link{\"o}ping, Sweden},
  11517         abstract = {Peer-to-peer (P2P) networks are self-organizing, distributed systems, with no centralized authority or infrastructure. Because of the voluntary participation, the availability of resources in a P2P system can be highly variable and unpredictable. We use ideas from game theory to study the interaction of strategic and rational peers, and propose a differential service-based incentive scheme to improve the system's performance},
  11518         www_section = {network, P2P, peer-to-peer networking, system performance},
  11519         isbn = {0-7695-2023-5 },
  11520         doi = { 10.1109/PTP.2003.1231503  },
  11521         url = { 10.1109/PTP.2003.1231503  },
  11522         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2703\%20-\%20Buragohain\%2C\%20Agrawal\%20\%26\%20Suri\%20-\%20Incentives\%20in\%20P2P\%20systems.pdf},
  11523         author = {Chiranjeeb Buragohain and Dvyakant Agrawal and Subhash Suri}
  11524 }
  11525 @conference { gap,
  11526         title = {gap--Practical Anonymous Networking},
  11527         booktitle = {Designing Privacy Enhancing Technologies},
  11528         year = {2003},
  11529         pages = {141--160},
  11530         publisher = {Springer-Verlag},
  11531         organization = {Springer-Verlag},
  11532         abstract = {This paper describes how anonymity is achieved in GNUnet, a framework for anonymous distributed and secure networking.
  11533 
  11534 The main focus of this work is gap, a simple protocol for anonymous transfer of data which can achieve better anonymity guarantees than many traditional indirection schemes and is additionally more efficient.  gap is based on a new perspective on how to achieve anonymity.  Based on this new perspective it is possible to relax the requirements stated in traditional indirection
  11535 schemes, allowing individual nodes to balance anonymity with efficiency according to their specific needs},
  11536         www_section = {anonymity, GNUnet, installation},
  11537         url = {http://grothoff.org/christian/aff.pdf},
  11538         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/aff.pdf},
  11539         author = {Krista Bennett and Christian Grothoff}
  11540 }
  11541 @conference {diaz:pet2003,
  11542         title = {Generalising Mixes},
  11543         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  11544         year = {2003},
  11545         month = mar,
  11546         pages = {18--31},
  11547         publisher = {Springer-Verlag, LNCS 2760},
  11548         organization = {Springer-Verlag, LNCS 2760},
  11549         abstract = {In this paper we present a generalised framework for expressing batching strategies of a mix. First, we note that existing mixes can be represented as functions from the number of messages in the mix to the fraction of messages to be flushed.
  11550 We then show how to express existing mixes in the framework, and then suggest other mixes which arise out of that framework. We note that these cannot be expressed as pool mixes. In particular, we call binomial mix a timed pool mix that tosses coins and uses a probability function that depends on the number of messages inside the mix at the time of flushing. We discuss the properties of this mix},
  11551         www_section = {mix},
  11552         isbn = {978-3-540-20610-1},
  11553         doi = {10.1007/b94512},
  11554         url = {http://www.springerlink.com/content/jvuk0exyqxvcyhvy/},
  11555         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.9155.pdf},
  11556         author = {Claudia Diaz and Andrei Serjantov},
  11557         editor = {Roger Dingledine}
  11558 }
  11559 @conference {danezis:wpes2003,
  11560         title = {Heartbeat Traffic to Counter (n-1) Attacks},
  11561         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2003)},
  11562         year = {2003},
  11563         month = oct,
  11564         publisher = {ACM  New York, NY, USA},
  11565         organization = {ACM  New York, NY, USA},
  11566         address = {Washington, DC, USA},
  11567         abstract = {A dummy traffic strategy is described that can be implemented by mix nodes in an anonymous communication network to detect and counter active (n--1) attacks and their variants. Heartbeat messages are sent anonymously from the mix node back to itself in order to establish its state of connectivity with the rest of the network. In case the mix is under attack, the flow of heartbeat messages is interrupted and the mix takes measures to preserve the quality of the anonymity it provides by introducing decoy messages},
  11568         www_section = {anonymity, flooding attacks},
  11569         isbn = {1-58113-776-1},
  11570         doi = {10.1145/1005140.1005154},
  11571         url = {http://portal.acm.org/citation.cfm?id=1005154},
  11572         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-wpes2003.pdf},
  11573         author = {George Danezis and Len Sassaman}
  11574 }
  11575 @booklet {herbivore:tr,
  11576         title = {Herbivore: A Scalable and Efficient Protocol for Anonymous Communication},
  11577         number = {2003-1890},
  11578         year = {2003},
  11579         month = feb,
  11580         publisher = {Cornell University},
  11581         address = {Ithaca, NY},
  11582         abstract = {Anonymity is increasingly important for networked applications amidst concerns over censorship and privacy. In this paper, we describe Herbivore, a peer-to-peer, scalable, tamper-resilient communication system that provides provable anonymity and privacy. Building on dining cryptographer networks, Herbivore scales by partitioning the network into anonymizing cliques. Adversaries able to monitor all network traffic cannot deduce the identity of a sender or receiver beyond an anonymizing clique. In addition to strong anonymity, Herbivore simultaneously provides high efficiency and scalability, distinguishing it from other anonymous communication protocols. Performance measurements from a prototype implementation show that the system can achieve high bandwidths and low latencies when deployed over the Internet},
  11583         www_section = {anonymity, P2P, privacy},
  11584         url = {http://ecommons.cornell.edu/handle/1813/5606},
  11585         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herbivore-tr.pdf},
  11586         author = {Goel, Sharad and Mark Robson and Milo Polte and Emin G{\"u}n Sirer}
  11587 }
  11588 @article {10.1109/ICPP.2003.1240580,
  11589         title = {HIERAS: A DHT Based Hierarchical P2P Routing Algorithm},
  11590         journal = {Parallel Processing, International Conference on},
  11591         year = {2003},
  11592         pages = {0--187},
  11593         publisher = {IEEE Computer Society},
  11594         address = {Los Alamitos, CA, USA},
  11595         abstract = {Routing algorithm has great influence on system overall performance in Peer-to-Peer (P2P) applications. In current DHT based routing algorithms, routing tasks are distributed across all system peers. However, a routing hop could happen between two widely separated peers with high network link latency which greatly increases system routing overheads.
  11596 In this paper, we propose a new P2P routing algorithm--- HIERAS to relieve this problem, it keeps scalability property of current DHT algorithms and improves system routing performance by the introduction of hierarchical structure. In HIERAS, we create several lower level P2P rings besides the highest level P2P ring. A P2P ring is a subset of the overall P2P overlay network. We create P2P rings in such a strategy that the average link latency between two peers in lower level rings is much smaller than higher level rings. Routing tasks are first executed in lower level rings before they go up to higher level rings, a large portion of routing hops previously executed in the global P2P ring are now replaced by hops in lower level rings, thus routing overheads can be reduced. The simulation results show HIERAS routing algorithm can significantly improve P2P system routing performance},
  11597         www_section = {distributed hash table, P2P},
  11598         issn = {0190-3918},
  11599         doi = {10.1109/ICPP.2003.1240580},
  11600         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/ICPP.2003.1240580},
  11601         author = {Zhiyong Xu and Rui Min and Yiming Hu}
  11602 }
  11603 @conference {Blake:2003:HAS:1251054.1251055,
  11604         title = {High Availability, Scalable Storage, Dynamic Peer Networks: Pick Two},
  11605         booktitle = {HotOS IX--Proceedings of the 9th conference on Hot Topics in Operating Systems },
  11606         year = {2003},
  11607         month = may,
  11608         pages = {1--1},
  11609         publisher = {USENIX Association},
  11610         organization = {USENIX Association},
  11611         address = {Lihue, Hawaii, USA},
  11612         abstract = {Peer-to-peer storage aims to build large-scale, reliable and available storage from many small-scale unreliable, low-availability distributed hosts. Data redundancy is the key to any data guarantees. However, preserving redundancy in the face of highly dynamic membership is costly. We use a simple resource usage model to measured behavior from the Gnutella file-sharing network to argue that large-scale cooperative storage is limited by likely dynamics and cross-system bandwidth -- not by local disk space. We examine some bandwidth optimization strategies like delayed response to failures, admission control, and load-shifting and find that they do not alter the basic problem. We conclude that when redundancy, data scale, and dynamics are all high, the needed cross-system bandwidth is unreasonable},
  11613         www_section = {distributed hosts, dynamic peer network, peer-to-peer storage, redundancy},
  11614         url = {http://dl.acm.org/citation.cfm?id=1251054.1251055},
  11615         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotOS\%20IX\%20-\%20High\%20available\%2C\%20scalable\%20storage\%2C\%20dynamic\%20peer\%20networks.pdf},
  11616         author = {Blake, Charles and Rodrigues, Rodrigo}
  11617 }
  11618 @conference {10.1109/PTP.2003.1231513,
  11619         title = {Identity Crisis: Anonymity vs. Reputation in P2P Systems},
  11620         booktitle = {P2P'03. Proceecings of the 3rd International Conference on Peer-to-Peer Computing},
  11621         year = {2003},
  11622         month = sep,
  11623         pages = {0--134},
  11624         publisher = {IEEE Computer Society},
  11625         organization = {IEEE Computer Society},
  11626         address = {Link{\"o}ping, Sweden},
  11627         abstract = {The effectiveness of reputation systems for peer-to-peer resource-sharing networks is largely dependent on the reliability of the identities used by peers in the network. Much debate has centered around how closely one's pseudoidentity in the network should be tied to their real-world identity, and how that identity is protected from malicious spoofing. In this paper we investigate the cost in efficiency of two solutions to the identity problem for peer-to-peer reputation systems. Our results show that, using some simple mechanisms, reputation systems can provide a factor of 4 to 20 improvement in performance over no reputation system, depending on the identity model used},
  11628         www_section = {anonymity, identity, identity model, P2P, peer-to-peer networking, reliability, reputation, reputation system},
  11629         isbn = {0-7695-2023-5},
  11630         doi = {http://doi.ieeecomputersociety.org/10.1109/PTP.2003.1231513},
  11631         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2703\%20-\%20Identity\%20crisis\%3A\%20anonymity\%20vs\%20reputation.pdf},
  11632         author = {Marti, Sergio and Hector Garcia-Molina}
  11633 }
  11634 @conference {Gummadi:2003:IDR:863955.863998,
  11635         title = {The impact of DHT routing geometry on resilience and proximity},
  11636         booktitle = {SIGCOMM '03--Proceedings of the 2003 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communications},
  11637         series = {SIGCOMM '03},
  11638         year = {2003},
  11639         month = aug,
  11640         pages = {381--394},
  11641         publisher = {ACM},
  11642         organization = {ACM},
  11643         address = {Karlsruhe, Germany},
  11644         abstract = {The various proposed DHT routing algorithms embody several different underlying routing geometries. These geometries include hypercubes, rings, tree-like structures, and butterfly networks. In this paper we focus on how these basic geometric approaches affect the resilience and proximity properties of DHTs. One factor that distinguishes these geometries is the degree of flexibility they provide in the selection of neighbors and routes. Flexibility is an important factor in achieving good static resilience and effective proximity neighbor and route selection. Our basic finding is that, despite our initial preference for more complex geometries, the ring geometry allows the greatest flexibility, and hence achieves the best resilience and proximity performance},
  11645         www_section = {distributed hash table, flexibility, routing geometry},
  11646         isbn = {1-58113-735-4},
  11647         doi = {http://doi.acm.org/10.1145/863955.863998},
  11648         url = {http://doi.acm.org/10.1145/863955.863998},
  11649         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2703\%20-\%20The\%20impact\%20of\%20DHT\%20routing\%20geometry\%20on\%20resilience\%20and\%20proximity.pdf},
  11650         author = {Krishna Phani Gummadi and Gummadi, Ramakrishna and Steven D. Gribble and Sylvia Paul Ratnasamy and S Shenker and Ion Stoica}
  11651 }
  11652 @conference {clayton:pet2003,
  11653         title = {Improving Onion Notation},
  11654         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  11655         year = {2003},
  11656         month = mar,
  11657         pages = {81--87},
  11658         publisher = {Springer-Verlag, LNCS 2760},
  11659         organization = {Springer-Verlag, LNCS 2760},
  11660         abstract = {Several di$\#$erent notations are used in the literature of MIX networks to describe the nested encrypted structures now widely known as "onions". The shortcomings of these notations are described and a new notation is proposed, that as well as having some advantages from a typographical point of view, is also far clearer to read and to reason about. The proposed notation generated a lively debate at the PET2003 workshop and the various views, and alternative proposals, are reported upon. The workshop participants did not reach any consensus on improving onion notation, but there is now a heightened awareness of the problems that can arise with existing representations},
  11661         www_section = {onion routing},
  11662         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.5965},
  11663         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clayton-pet2003.pdf},
  11664         author = {Richard Clayton},
  11665         editor = {Roger Dingledine}
  11666 }
  11667 @conference {2003_5,
  11668         title = {Incentives build robustness in BitTorrent},
  11669         booktitle = {NetEcon'03--Proceedings of the Workshop on Economics of Peer-to-Peer Systems },
  11670         year = {2003},
  11671         month = jun,
  11672         address = {Berkeley, CA, USA},
  11673         abstract = {The BitTorrent file distribution system uses tit-for-tat as a method to seeking pareto efficiency. It achieves a higher level of robustness and resource utilization than any currently known cooperative technique. We explain what BitTorrent does, and how economic methods are used to achieve that goal},
  11674         www_section = {BitTorrent, resource utilization, robustness},
  11675         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2703\%20-\%20Cohen\%20-\%20Incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf},
  11676         author = {Bram Cohen}
  11677 }
  11678 @conference {Lai03incentivesfor,
  11679         title = {Incentives for Cooperation in Peer-to-Peer Networks},
  11680         booktitle = {P2PECON. Proceedings of the First Workshop on Economics of Peer-to-Peer Systems},
  11681         year = {2003},
  11682         month = jun,
  11683         address = {Berkeley, California, USA},
  11684         abstract = {this paper, our contributions are to generalize from the traditional symmetric EPD to the asymmetric transactions of P2P applications, map out the design space of EPD-based incentive techniques, and simulate a subset of these techniques. Our findings are as follows: Incentive techniques relying on private history (where entites only use their private histories of entities' actions) fail as the population size increases},
  11685         www_section = {P2P, privacy},
  11686         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.1949},
  11687         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/incentives-for-cooperation-in_0.pdf},
  11688         author = {Kevin Lai and Michal Feldman and Ion Stoica and John Chuang}
  11689 }
  11690 @conference {Ahn03k-anonymousmessage,
  11691         title = {k-Anonymous Message Transmission},
  11692         booktitle = {Conference on Computer and Communications Security},
  11693         year = {2003},
  11694         month = jan,
  11695         publisher = {ACM  New York, NY, USA},
  11696         organization = {ACM  New York, NY, USA},
  11697         address = {Washington D.C., USA},
  11698         abstract = {Informally, a communication protocol is sender k--anonymous if it can guarantee that an adversary, trying to determine the sender of a particular message, can only narrow down its search to a set of k suspects. Receiver k-anonymity places a similar guarantee on the receiver: an adversary, at best, can only narrow down the possible receivers to a set of size k. In this paper we introduce the notions of sender and receiver k-anonymity and consider their applications. We show that there exist simple and e$\#$cient protocols which are k-anonymous for both the sender and the receiver in a model where a polynomial time adversary can see all tra$\#$c in the network and can control up to a constant fraction of the participants. Our protocol is provably secure, practical, and does not require the existence of trusted third parties. This paper also provides a conceptually simple augmentation to Chaum's DC-Nets that adds robustness against adversaries who attempt to disrupt the protocol through perpetual transmission or selective non-participation},
  11699         isbn = {1-58113-738-9},
  11700         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.9348\&rep=rep1\&type=url\&i=2},
  11701         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/k-anonymous_ccs2003.pdf},
  11702         author = {Luis von Ahn and Andrew Bortz and Nicholas J. Hopper}
  11703 }
  11704 @conference {2003_6,
  11705         title = {KARMA: a Secure Economic Framework for P2P Resource Sharing},
  11706         booktitle = {P2PECON'05. Proceedings of the 3rd Workshop on Economics of Peer-to-Peer Systems},
  11707         year = {2003},
  11708         month = jun,
  11709         address = {Berkeley, CA, USA},
  11710         abstract = {Peer-to-peer systems are typically designed around the assumption that all peers will willingly contribute resources to a global pool. They thus suffer from freeloaders,that is, participants who consume many more resources than they contribute. In this paper, we propose a general economic framework for avoiding freeloaders in peer-to-peer systems. Our system works by keeping track of the resource consumption and resource contributionof each participant. The overall standing of each},
  11711         www_section = {economic framework, freeloader, karma, p2p resource sharing},
  11712         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20KARMA.pdf},
  11713         author = {Vivek Vishnumurthy and Sangeeth Chandrakumar and Emin G{\"u}n Sirer}
  11714 }
  11715 @conference {Gupta03kelips:building,
  11716         title = {Kelips: Building an efficient and stable P2P DHT through increased memory and background overhead},
  11717         booktitle = {Proceedings of the 2nd International Workshop on Peer-to-Peer Systems (IPTPS '03)},
  11718         year = {2003},
  11719         abstract = {A peer-to-peer (p2p) distributed hash table (DHT) system allows hosts to join and fail silently (or leave), as well as to insert and retrieve files (objects). This paper explores a new point in design space in which increased memory usage and constant background communication overheads are tolerated to reduce file lookup times and increase stability to failures and churn. Our system, called Kelips, uses peer-to-peer gossip to partially replicate file index information. In Kelips, (a) under normal conditions, file lookups are resolved with O(1) time and complexity (i.e., independent of system size), and (b) membership changes (e.g., even when a large number of nodes fail) are detected and disseminated to the system quickly. Per-node memory requirements are small in medium-sized systems. When there are failures, lookup success is ensured through query rerouting. Kelips achieves load balancing comparable to existing systems. Locality is supported by using topologically aware gossip mechanisms. Initial results of an ongoing experimental study are also discussed},
  11720         www_section = {distributed hash table, P2P},
  11721         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.3464},
  11722         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.3464.pdf},
  11723         author = {Indranil Gupta and Kenneth P. Birman and Prakash Linga and Alan Demers and Robbert Van Renesse}
  11724 }
  11725 @book {2003_7,
  11726         title = {Koorde: A Simple degree-optimal distributed hash table},
  11727         booktitle = {Peer-to-Peer Systems II},
  11728         series = {Lecture Notes in Computer Science},
  11729         volume = {2735/2003},
  11730         year = {2003},
  11731         pages = {98--107},
  11732         publisher = {Springer },
  11733         organization = {Springer },
  11734         address = {Berlin / Heidelberg},
  11735         abstract = {Koorde is a new distributed hash table (DHT) based on Chord 15 and the de Bruijn graphs 2. While inheriting the simplicity of Chord, Koorde meets various lower bounds, such as O(log n)  hops per lookup request with only 2 neighbors per node (where n is the number of nodes in the DHT), and O(log n/log log n)  hops per lookup request with O(log n)  neighbors per node},
  11736         www_section = {de Bruijn graph, distributed hash table, Koorde},
  11737         doi = {10.1007/b11823},
  11738         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koorde.pdf},
  11739         author = {Frans M. Kaashoek and David Karger}
  11740 }
  11741 @booklet { turner03lightweight,
  11742         title = {A Lightweight Currency Paradigm for the P2P Resource Market},
  11743         year = {2003},
  11744         abstract = {A P2P resource market is a market in which peers trade resources (including storage, bandwidth and CPU cycles) and services with each other. We propose a specific paradigm for a P2P resource market. This paradigm has five key components: (i) pairwise trading market, with peers setting their own prices for offered resources; (ii) multiple currency economy, in which any peer can issue its own currency; (iii) no legal recourse, thereby limiting the transaction costs in trades; (iv) a simple, secure application-layer protocol; and (v) entity identification based on the entity's unique public key. We argue that the paradigm can lead to a flourishing P2P resource market, allowing applications to tap into the huge pool of surplus peer resources. We illustrate the paradigm and its corresponding Lightweight Currency Protocol (LCP) with several application examples},
  11745         www_section = {P2P},
  11746         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.1309},
  11747         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LightweightParadigm.pdf},
  11748         author = {David A. Turner and Keith W. Ross}
  11749 }
  11750 @article {Eugster:2003:LPB:945506.945507,
  11751         title = {Lightweight probabilistic broadcast},
  11752         journal = {ACM Trans. Comput. Syst},
  11753         volume = {21},
  11754         year = {2003},
  11755         month = {November},
  11756         pages = {341--374},
  11757         publisher = {ACM},
  11758         address = {New York, NY, USA},
  11759         www_section = {Broadcast, buffering, garbage collection, gossip, noise, randomization, reliability, scalability},
  11760         issn = {0734-2071},
  11761         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lightweight_prob_broadcast.pdf},
  11762         author = {Patrick Eugster and Rachid Guerraoui and Sidath B. Handurukande and Petr Kouznetsov and Anne-Marie Kermarrec}
  11763 }
  11764 @conference {864000,
  11765         title = {Making gnutella-like P2P systems scalable},
  11766         booktitle = {SIGCOMM '03: Proceedings of the 2003 conference on Applications, technologies, architectures, and protocols for computer communications},
  11767         year = {2003},
  11768         pages = {407--418},
  11769         publisher = {ACM},
  11770         organization = {ACM},
  11771         address = {New York, NY, USA},
  11772         abstract = {Napster pioneered the idea of peer-to-peer file sharing, and supported it with a centralized file search facility. Subsequent P2P systems like Gnutella adopted decentralized search algorithms. However, Gnutella's notoriously poor scaling led some to propose distributed hash table solutions to the wide-area file search problem. Contrary to that trend, we advocate retaining Gnutella's simplicity while proposing new mechanisms that greatly improve its scalability. Building upon prior research [1, 12, 22], we propose several modifications to Gnutella's design that dynamically adapt the overlay topology and the search algorithms in order to accommodate the natural heterogeneity present in most peer-to-peer systems. We test our design through simulations and the results show three to five orders of magnitude improvement in total system capacity. We also report on a prototype implementation and its deployment on a testbed},
  11773         www_section = {distributed hash table, Gnutella, P2P},
  11774         isbn = {1-58113-735-4},
  11775         doi = {10.1145/863955.864000},
  11776         url = {http://portal.acm.org/citation.cfm?id=864000$\#$},
  11777         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.5444.pdf},
  11778         author = {Chawathe, Yatin and Breslau, Lee and Lanham, Nick and S Shenker}
  11779 }
  11780 @conference {1090700,
  11781         title = {Metadata Efficiency in Versioning File Systems},
  11782         booktitle = {FAST '03: Proceedings of the 2nd USENIX Conference on File and Storage Technologies},
  11783         year = {2003},
  11784         pages = {43--58},
  11785         publisher = {USENIX Association},
  11786         organization = {USENIX Association},
  11787         address = {Berkeley, CA, USA},
  11788         abstract = {Versioning file systems retain earlier versions of modified files, allowing recovery from user mistakes or system corruption. Unfortunately, conventional versioning systems do not efficiently record large numbers of versions. In particular, versioned metadata can consume as much space as versioned data. This paper examines two space-efficient metadata structures for versioning file systems and describes their integration into the Comprehensive Versioning File System (CVFS), which keeps all versions of all files. Journal-based metadata encodes each metadata version into a single journal entry; CVFS uses this structure for inodes and indirect blocks, reducing the associated space requirements by 80\%. Multiversion b-trees extend each entrys key with a timestamp and keep current and historical entries in a single tree; CVFS uses this structure for directories, reducing the associated space requirements by 99\%. Similar space reductions are predicted via trace analysis for other versioning strategies (e.g., on-close versioning). Experiments with CVFS verify that its current-version performance is sim-ilar to that of non-versioning file systems while reducing overall space needed for history data by a factor of two. Although access to historical versions is slower than con-ventional versioning systems, checkpointing is shown to mitigate and bound this effect},
  11789         www_section = {file systems},
  11790         url = {http://portal.acm.org/citation.cfm?id=1090694.1090700$\#$},
  11791         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fast03.pdf},
  11792         author = {Soules, Craig A. N. and Goodson, Garth R. and Strunk, John D. and Ganger, Gregory R.}
  11793 }
  11794 @conference {newman:pet2003,
  11795         title = {Metrics for Traffic Analysis Prevention},
  11796         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  11797         year = {2003},
  11798         month = mar,
  11799         pages = {48--65},
  11800         publisher = {Springer-Verlag, LNCS 2760},
  11801         organization = {Springer-Verlag, LNCS 2760},
  11802         abstract = {This paper considers systems for Traffic Analysis Prevention (TAP) in a theoretical model. It considers TAP based on padding and rerouting of messages and describes the effects each has on the difference between the actual and the observed traffic matrix (TM). The paper introduces an entropy-based approach to the amount of uncertainty a global passive adversary has in determining the actual TM, or alternatively, the probability that the actual TM has a property of interest. Unlike previous work, the focus is on determining the overall amount of anonymity a TAP system can provide, or the amount it can provide for a given cost in padding and rerouting, rather than on the amount of protection a afforded particular communications},
  11803         www_section = {traffic analysis, traffic matrix},
  11804         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/steinbrecher-pet2003_0.pdf},
  11805         author = {Richard E. Newman and Ira S. Moskowitz and Paul Syverson and Andrei Serjantov},
  11806         editor = {Roger Dingledine}
  11807 }
  11808 @booklet {mixmaster-spec,
  11809         title = {Mixmaster Protocol --- Version 2},
  11810         year = {2003},
  11811         month = {July},
  11812         abstract = {Most e-mail security protocols only protect the message body, leaving useful information such as the the identities of the conversing parties, sizes of messages and frequency of message exchange open to adversaries. This document describes Mixmaster (version 2), a mail transfer protocol designed to protect electronic mail against traffic
  11813 analysis.
  11814 
  11815 Mixmaster is based on D. Chaum's mix-net protocol. A mix (remailer) is a service that forwards messages, using public key
  11816 cryptography to hide the correlation between its inputs and outputs. Sending messages through sequences of remailers achieves anonymity and unobservability of communications against a powerful adversary},
  11817         www_section = {electronic mail, public key cryptography, traffic analysis},
  11818         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freehaven.net-anonbib-cache-mixmaster-spec.txt.pdf},
  11819         author = {Ulf M{\"o}ller and Lance Cottrell and Peter Palfrader and Len Sassaman}
  11820 }
  11821 @conference {Danezis03mixminion:design,
  11822         title = {Mixminion: Design of a Type III Anonymous Remailer Protocol},
  11823         booktitle = {In Proceedings of the 2003 IEEE Symposium on Security and Privacy},
  11824         year = {2003},
  11825         pages = {2--15},
  11826         abstract = {We present Mixminion, a message-based anonymous remailer protocol with secure single-use reply blocks. Mix nodes cannot distinguish Mixminion forward messages from reply messages, so forward and reply messages share the same anonymity set. We add directory servers that allow users to learn public keys and performance statistics of participating remailers, and we describe nymservers that provide long-term pseudonyms using single-use reply blocks as a primitive. Our design integrates link encryption between remailers to provide forward anonymity. Mixminion works in a real-world Internet environment, requires little synchronization or coordination between nodes, and protects against known anonymity-breaking attacks as well as or better than other systems with similar design parameters. 1. Overview Chaum first introduced anonymous remailers over 20 years ago [7]},
  11827         url = { http://mixminion.net/minion-design.pdf},
  11828         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/minion-design.pdf},
  11829         author = {George Danezis and Roger Dingledine and Nick Mathewson}
  11830 }
  11831 @conference {danezis:pet2003,
  11832         title = {Mix-networks with Restricted Routes},
  11833         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  11834         year = {2003},
  11835         month = mar,
  11836         pages = {1--17},
  11837         publisher = {Springer-Verlag, LNCS 2760},
  11838         organization = {Springer-Verlag, LNCS 2760},
  11839         abstract = {We present a mix network topology that is based on sparse expander graphs, with each mix only communicating with a few neighbouring others. We analyse the anonymity such networks provide, and compare it with fully connected mix networks and mix cascades. We prove that such a topology is e$\#$cient since it only requires the route length of messages to be relatively small in comparison with the number of mixes to achieve maximal anonymity. Additionally mixes can resist intersection attacks while their batch size, that is directly linked to the latency of the network, remains constant. A worked example of a network is also presented to illustrate how these results can be applied to create secure mix networks in practise},
  11840         www_section = {anonymity, mix cascades, traffic analysis},
  11841         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.6.1188},
  11842         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2003.pdf},
  11843         author = {George Danezis},
  11844         editor = {Roger Dingledine}
  11845 }
  11846 @conference {steinbrecher:pet2003,
  11847         title = {Modelling Unlinkability},
  11848         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  11849         year = {2003},
  11850         month = mar,
  11851         pages = {32--47},
  11852         publisher = {Springer-Verlag, LNCS 2760},
  11853         organization = {Springer-Verlag, LNCS 2760},
  11854         abstract = {While there have been made several proposals to define and measure anonymity (e.g., with information theory, formal languages and logics) unlinkability has not been modelled generally and formally. In contrast to anonymity unlinkability is not restricted to persons. In fact the unlinkability of arbitrary items can be measured. In this paper we try to formalise the notion of unlinkability, give a refinement of anonymity definitions based on this formalisation and show the impact of unlinkability on anonymity. We choose information theory as a method to describe unlinkability because it allows an easy probabilistic description. As an illustration for our formalisation we describe its meaning for communication systems},
  11855         www_section = {anonymity, unlinkability},
  11856         isbn = {978-3-540-20610-1},
  11857         doi = {10.1007/b94512},
  11858         url = {http://www.springerlink.com/content/dxteg659uf2jtdd7/},
  11859         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/steinbrecher-pet2003.pdf},
  11860         author = {Sandra Steinbrecher and Stefan K{\"o}psell},
  11861         editor = {Roger Dingledine}
  11862 }
  11863 @conference {Li:2003:MRQ:958491.958500,
  11864         title = {Multi-dimensional range queries in sensor networks},
  11865         booktitle = {Proceedings of the 1st international conference on Embedded networked sensor systems},
  11866         series = {SenSys '03},
  11867         year = {2003},
  11868         pages = {63--75},
  11869         publisher = {ACM},
  11870         organization = {ACM},
  11871         address = {New York, NY, USA},
  11872         www_section = {distributed hash table, multi-dimensional range queries, range queries},
  11873         isbn = {1-58113-707-9},
  11874         doi = {10.1145/958491.958500},
  11875         url = {http://doi.acm.org/10.1145/958491.958500},
  11876         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/eScholarship\%20UC\%20item\%204x6723n2.pdf},
  11877         author = {Li, Xin and Kim, Young Jin and Govindan, Ramesh and Hong, Wei}
  11878 }
  11879 @conference {Conrad03multiplelanguage,
  11880         title = {Multiple language family support for programmable network systems},
  11881         booktitle = {In Proceedings of the 5th Annual International Working Conference on Active Networks (IWAN)},
  11882         year = {2003},
  11883         abstract = {Various programmable networks have been designed and implemented during the last couple of years. Many of them are focused on a single programming language only. This limitation might{\textemdash}to a certain extend{\textemdash}hinder the productivity of service modules being programmed for such networks. Therefore, the concurrent support of service modules written in multiple programming languages was investigated within the FlexiNet project. Basically, support for three major programming paradigms was incorporated into FlexiNet: compiled programming languages like C, interpreted languages (e.g., Java), and hardware description languages such as VHDL. The key concept can be seen in an integral interface that is used by all three programming languages. This leads to a configuration scheme which is totally transparent to the programming languages used to develop the service. In order to get a better idea about the impact of the programming language used, some measurement experiments were conducted},
  11884         www_section = {flexible service platforms, programmable networks},
  11885         isbn = {978-3-540-21250-8},
  11886         doi = {10.1007/b96396},
  11887         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.68.3301},
  11888         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scholler03language.pdf},
  11889         author = {Michael Conrad and Marcus Schoeller and Thomas Fuhrmann and Gerhard Bocksch and Martina Zitterbart}
  11890 }
  11891 @conference {Gay03thenesc,
  11892         title = {The nesC language: A holistic approach to networked embedded systems},
  11893         booktitle = {In Proceedings of Programming Language Design and Implementation (PLDI)},
  11894         year = {2003},
  11895         pages = {1--11},
  11896         abstract = {We present nesC, a programming language for networked embedded systems that represent a new design space for application developers. An example of a networked embedded system is a sensor network, which consists of (potentially) thousands of tiny, low-power "motes," each of which execute concurrent, reactive programs that must operate with severe memory and power constraints.nesC's contribution is to support the special needs of this domain by exposing a programming model that incorporates event-driven execution, a flexible concurrency model, and component-oriented application design. Restrictions on the programming model allow the nesC compiler to perform whole-program analyses, including data-race detection (which improves reliability) and aggressive function inlining (which reduces resource consumption).nesC has been used to implement TinyOS, a small operating system for sensor networks, as well as several significant sensor applications. nesC and TinyOS have been adopted by a large number of sensor network research groups, and our experience and evaluation of the language shows that it is effective at supporting the complex, concurrent programming style demanded by this new class of deeply networked systems},
  11897         www_section = {data races, nesC, TinyOS},
  11898         doi = {10.1145/781131.781133},
  11899         url = {http://portal.acm.org/citation.cfm?id=781133},
  11900         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.127.9488.pdf},
  11901         author = {David Gay and Matt Welsh and Philip Levis and Eric Brewer and Robert Von Behren and Culler, David}
  11902 }
  11903 @booklet {Fuhrmann_networkservices,
  11904         title = {Network Services for the Support of Very-Low-Resource Devices},
  11905         year = {2003},
  11906         abstract = {Visions of future computing scenarios envisage a multitude of very-low-resource devices linked by power-efficient wireless communication means. This paper presents our vision of such a scenario. From this vision requirements are derived for an infrastructure that is able to satisfy the largely differing needs of these devices. The paper also shows how innovative, collaborating applications between distributed sensors and actuators can arise from such an infrastructure. The realization of such innovative applications is illustrated with two examples of straightforward services that have been implemented with the AMnet infrastructure that is currently being developed in the FlexiNet project. Additionally, first performance measurements for one of these services are given. Index terms {\textemdash} Bluetooth, Programmable networks, Sensoractuator networks},
  11907         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.69.186},
  11908         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ASWN2003.pdf},
  11909         author = {Thomas Fuhrmann and Till Harbaum and Martina Zitterbart}
  11910 }
  11911 @conference {Bauer03newcovert,
  11912         title = {New Covert Channels in HTTP: Adding Unwitting Web Browsers to Anonymity Sets},
  11913         booktitle = {In Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2003)},
  11914         year = {2003},
  11915         pages = {72--78},
  11916         publisher = {ACM Press},
  11917         organization = {ACM Press},
  11918         abstract = {This paper presents new methods enabling anonymous communication on the Internet. We describe a new protocol that allows us to create an anonymous overlay network by exploiting the web browsing activities of regular users. We show that the overlay network provides an anonymity set greater than the set of senders and receivers in a realistic threat model. In particular, the protocol provides unobservability in our threat model},
  11919         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.6246\&rep=rep1\&type=pdf},
  11920         author = {Matthias Bauer}
  11921 }
  11922 @conference {bauer:wpes2003,
  11923         title = {New Covert Channels in HTTP: Adding Unwitting Web Browsers to Anonymity Sets},
  11924         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2003)},
  11925         year = {2003},
  11926         month = {October},
  11927         publisher = {ACM  New York, NY, USA},
  11928         organization = {ACM  New York, NY, USA},
  11929         address = {Washington, DC, USA},
  11930         abstract = {This paper presents new methods enabling anonymous communication on the Internet. We describe a new protocol that allows us to create an anonymous overlay network by exploiting the web browsing activities of regular users. We show that the overlay net work provides an anonymity set greater than the set of senders and receivers in a realistic threat model. In particular, the protocol provides unobservability in our threat model},
  11931         www_section = {anonymity, convert channel, HTTP},
  11932         isbn = {1-58113-776-1},
  11933         doi = {10.1145/1005140.1005152},
  11934         url = {http://portal.acm.org/citation.cfm?id=1005152},
  11935         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.6246.pdf},
  11936         author = {Matthias Bauer}
  11937 }
  11938 @booklet {Klinedinst_anew,
  11939         title = {A New Generation of File Sharing Tools},
  11940         year = {2003},
  11941         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.1694\&rep=rep1\&type=pdf},
  11942         author = {Dan Klinedinst}
  11943 }
  11944 @booklet {Fuhrmann_anode,
  11945         title = {A Node Evaluation Mechanism for Service Setup in},
  11946         year = {2003},
  11947         abstract = {AMnet is a programmable network that aims at the flexible and rapid creation of services within an IP network. Examples for typical services include network layer enhancements e.g. for multicast and mobility, transport layer enhancements e.g. to integrate wireless LANs, and various application layer services e.g. for media transcoding and content distribution. AMnet is based on regular Linux boxes that run an execution environment (EE), a resource monitor, and a basic signaling-engine. These so-called active nodes run the services and provide support for resource-management and module-relocation. Services are created by service modules, small pieces of code, that are executed within the EE. Based on the standard netfilter mechanism of Linux, service modules have full access to the network traffic passing through the active node. This paper describes the evaluation mechanism for service setup in AMnet. In order to determine where a service module can be started, service modules are accompanied by evaluation modules. This allows service module authors to implement various customized strategies for node-selection and service setup. Examples that are supported by the AMnet evaluation mechanism are a) service setup at a fixed position, e.g. as gateway, b) along a fixed path (with variable position along that path), c) at variable positions inside the network with preferences for certain constellations, or d) at an unspecified position, e.g. for modification of multicasted traffic. The required path information is gathered by the AMnodes present in the network. By interaction with the resource monitors of the AMnodes and the service module repository of the respective administrative domain, the AMnet evaluation also ensures overall system security and stability},
  11948         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.69.8749},
  11949         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03evaluation.pdf},
  11950         author = {Thomas Fuhrmann and Marcus Schoeller and Christina Schmidt and Martina Zitterbart}
  11951 }
  11952 @conference {Tolia03opportunisticuse,
  11953         title = {Opportunistic Use of Content Addressable Storage for Distributed File Systems},
  11954         booktitle = {In Proceedings of the 2003 USENIX Annual Technical Conference},
  11955         year = {2003},
  11956         pages = {127--140},
  11957         abstract = {Motivated by the prospect of readily available Content Addressable Storage (CAS), we introduce the concept of file recipes. A file's recipe is a first-class file system object listing content hashes that describe the data blocks composing the file. File recipes provide applications with instructions for reconstructing the original file from available CAS data blocks. We describe one such application of recipes, the CASPER distributed file system. A CASPER client opportunistically fetches blocks from nearby CAS providers to improve its performance when the connection to a file server traverses a low-bandwidth path. We use measurements of our prototype to evaluate its performance under varying network conditions. Our results demonstrate significant improvements in execution times of applications that use a network file system. We conclude by describing fuzzy block matching, a promising technique for using approximately matching blocks on CAS providers to reconstitute the exact desired contents of a file at a client},
  11958         www_section = {file systems, storage},
  11959         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.740},
  11960         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/casper-usenix2003.pdf},
  11961         author = {Niraj Tolia and Michael Kozuch and Satyanarayanan, Mahadev and Brad Karp and Thomas Bressoud and Adrian Perrig}
  11962 }
  11963 @conference {792493,
  11964         title = {An Overlay-Network Approach for Distributed Access to SRS},
  11965         booktitle = {CCGRID '03: Proceedings of the 3st International Symposium on Cluster Computing and the Grid},
  11966         year = {2003},
  11967         pages = {0--601},
  11968         publisher = {IEEE Computer Society},
  11969         organization = {IEEE Computer Society},
  11970         address = {Washington, DC, USA},
  11971         abstract = {SRS is a widely used system for integrating biologicaldatabases. Currently, SRS relies only on locally providedcopies of these databases. In this paper we propose a mechanism that also allows the seamless integration of remotedatabases. To this end, our proposed mechanism splits theexisting SRS functionality into two components and addsa third component that enables us to employ peer-to-peercomputing techniques to create optimized overlay-networkswithin which database queries can efficiently be routed. Asan additional benefit, this mechanism also reduces the administration effort that would be needed with a conventionalapproach using replicated databases},
  11972         www_section = {overlay networks, P2P, SRS},
  11973         isbn = {0-7695-1919-9},
  11974         url = {http://portal.acm.org/citation.cfm?id=792493$\#$},
  11975         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03overlaySRS.pdf},
  11976         author = {Thomas Fuhrmann and Andrea Schafferhans and Etzold, Thure}
  11977 }
  11978 @conference {SS03,
  11979         title = {Passive Attack Analysis for Connection-Based Anonymity Systems},
  11980         booktitle = {Proceedings of ESORICS 2003},
  11981         year = {2003},
  11982         month = {October},
  11983         publisher = {Springer Berlin / Heidelberg},
  11984         organization = {Springer Berlin / Heidelberg},
  11985         abstract = {In this paper we consider low latency connection-based anonymity systems which can be used for applications like web browsing or SSH. Although several such systems have been designed and built, their anonymity has so far not been adequately evaluated.
  11986 We analyse the anonymity of connection-based systems against passive adversaries. We give a precise description of two attacks, evaluate their effectiveness, and calculate the amount of traffic necessary to provide a minimum degree of protection against them},
  11987         www_section = {anonymity},
  11988         isbn = {978-3-540-20300-1},
  11989         doi = {10.1007/b13237},
  11990         url = {http://www.springerlink.com/content/8jva7vy8tkert9ur/},
  11991         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.5.2005.pdf},
  11992         author = {Andrei Serjantov and Peter Sewell}
  11993 }
  11994 @booklet {Loo03peer-to-peerbackup,
  11995         title = {Peer-To-Peer Backup for Personal Area Networks},
  11996         year = {2003},
  11997         abstract = {FlashBack is a peer-to-peer backup algorithm designed for powerconstrained devices running in a personal area network (PAN). Backups are performed transparently as local updates initiate the spread of backup data among a subset of the currently available peers. Flashback limits power usage by avoiding flooding and keeping small neighbor sets. Flashback has also been designed to utilize powered infrastructure when possible to further extend device lifetime. We propose our architecture and algorithms, and present initial experimental results that illustrate FlashBack's performance characteristics},
  11998         www_section = {backup, P2P, personal area network},
  11999         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.7820},
  12000         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/052820031647_102.pdf},
  12001         author = {Boon Thau Loo and Anthony LaMarca and Gaetano Borriello and Boon Thau Loo}
  12002 }
  12003 @booklet {Aberer03p-grid:a,
  12004         title = {P-Grid: A Self-organizing Structured P2P System},
  12005         year = {2003},
  12006         abstract = {this paper was supported in part by the National Competence Center in Research on Mobile Information and Communication Systems (NCCR-MICS), a center supported by the Swiss National Science Foundation under grant number 5005-67322 and by SNSF grant 2100064994, "Peer-to-Peer Information Systems." messages. From the responses it (randomly) selects certain peers to which direct network links are established},
  12007         www_section = {P2P},
  12008         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.5649},
  12009         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.5649.pdf},
  12010         author = {Karl Aberer and Philippe Cudre-Mauroux and Anwitaman Datta and Zoran Despotovic and Manfred Hauswirth and Magdalena Punceva and Roman Schmidt}
  12011 }
  12012 @conference {Cuenca-Acuna03planetp:using,
  12013         title = {PlanetP: Using Gossiping to Build Content Addressable Peer-to-Peer Information Sharing Communities},
  12014         booktitle = {12th IEEE International Symposium on High Performance Distributed Computing (HPDC-12 '03),},
  12015         year = {2003},
  12016         publisher = {IEEE Press},
  12017         organization = {IEEE Press},
  12018         address = {Seattle, Washington},
  12019         abstract = {PlanetP is a peer-to-peer system in which searching content is done mostly locally. Every peer knows which content is available at which other peers. The index information is represented compactly using bloom filters and distributed throughout the network using push and pull mechanisms },
  12020         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.6056\&rep=rep1\&type=url\&i=0},
  12021         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper.dvi_.pdf},
  12022         author = {Francisco Matias Cuenca-Acuna and Christopher Peery and Richard P. Martin and Thu D. Nguyen}
  12023 }
  12024 @conference {DBLP:conf/ccs/YangG03,
  12025         title = {PPay: micropayments for peer-to-peer systems},
  12026         booktitle = {CCS'03. Proceedings od the 10th ACM Conference on Computer and Communications Security},
  12027         year = {2003},
  12028         month = oct,
  12029         pages = {300--310},
  12030         publisher = {ACM},
  12031         organization = {ACM},
  12032         address = {Washington, DC, USA},
  12033         www_section = {economics, payment},
  12034         isbn = {1-58113-738-9 },
  12035         doi = {http://dx.doi.org/10.1145/948109.948150},
  12036         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2703\%20-\%20Yang\%20\%26\%20Garcia-Molina\%20-\%20PPay.pdf},
  12037         author = {Beverly Yang and Hector Garcia-Molina}
  12038 }
  12039 @conference {RP03-1,
  12040         title = {Practical Anonymity for the Masses with Mix-Networks},
  12041         booktitle = {Proceedings of the IEEE 8th Intl. Workshop on Enterprise Security (WET ICE 2003)},
  12042         year = {2003},
  12043         month = jun,
  12044         publisher = {IEEE Computer Society  Washington, DC, USA},
  12045         organization = {IEEE Computer Society  Washington, DC, USA},
  12046         address = {Linz, Austria},
  12047         abstract = {Designing mix-networks for low-latency applicationsthat offer acceptable performance and provide good resistanceagainst attacks without introducing too much over-headis very difficult. Good performance and small over-headsare vital to attract users and to be able to supportmany of them, because with only a few users, there is noanonymity at all. In this paper, we analyze how well differentkinds of mix-networks are suited to provide practicalanonymity for a very large number of users},
  12048         www_section = {performance},
  12049         isbn = {0-7695-1963-6},
  12050         url = {http://portal.acm.org/citation.cfm?id=938984.939808},
  12051         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RP03-1.pdf},
  12052         author = {Marc Rennhard and Bernhard Plattner}
  12053 }
  12054 @booklet {pt:03:ldpc,
  12055         title = {On the Practical Use of LDPC Erasure Codes for Distributed Storage Applications},
  12056         number = {CS-03-510},
  12057         year = {2003},
  12058         month = {September},
  12059         publisher = {University of Tennessee},
  12060         abstract = {This paper has been submitted for publication. Please see the above URL for current publication status. As peer-to-peer and widely distributed storage systems proliferate, the need to perform efficient erasure coding, instead of replication, is crucial to performance and efficiency. Low-Density Parity-Check (LDPC) codes have arisen as alternatives to standard erasure codes, such as Reed-Solomon codes, trading off vastly improved decoding performance for inefficiencies in the amount of data that must be acquired to perform decoding. The scores of papers written on LDPC codes typically analyze their collective and asymptotic behavior. Unfortunately, their practical application requires the generation and analysis of individual codes for finite systems. This paper attempts to illuminate the practical considerations of LDPC codes for peer-to-peer and distributed storage systems. The three main types of LDPC codes are detailed, and a huge variety of codes are generated, then analyzed using simulation. This analysis focuses on the performance of individual codes for finite systems, and addresses several important heretofore unanswered questions about employing LDPC codes in real-world systems. This material is based upon work supported by the National},
  12061         www_section = {distributed hash table, distributed storage, LDPC, P2P},
  12062         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.5709},
  12063         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ut-cs-03-510.pdf},
  12064         author = {James S. Plank and Michael G. Thomason}
  12065 }
  12066 @conference {camenisch2003pve,
  12067         title = {Practical Verifiable Encryption and Decryption of Discrete Logarithms},
  12068         booktitle = {Proceedings of CRYPTO 2003},
  12069         year = {2003},
  12070         pages = {126--144},
  12071         publisher = {Springer Verlag, LNCS 2729},
  12072         organization = {Springer Verlag, LNCS 2729},
  12073         abstract = {This paper addresses the problem of designing practical protocols for proving properties about encrypted data. To this end, it presents a variant of the new public key encryption of Cramer and Shoup based on Pailliers decision composite residuosity assumption, along with efficient protocols for verifiable encryption and decryption of discrete logarithms (and more generally, of representations with respect to multiple bases). This is the first verifiable encryption system that provides chosen ciphertext security and avoids inefficient cut-and-choose proofs. The presented protocols have numerous applications, including key escrow, optimistic fair exchange, publicly verifiable secret and signature sharing, universally composable commitments, group signatures, and confirmer signatures},
  12074         www_section = {public key cryptography},
  12075         isbn = {978-3-540-40674-7},
  12076         doi = {10.1007/b11817},
  12077         url = {http://www.springerlink.com/content/wjbh5579hdfd66ed/},
  12078         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camenisch2003pve.pdf},
  12079         author = {Jan Camenisch and Victor Shoup}
  12080 }
  12081 @conference {agrawal03,
  12082         title = {Probabilistic Treatment of MIXes to Hamper Traffic Analysis},
  12083         booktitle = {Proceedings of the 2003 IEEE Symposium on Security and Privacy},
  12084         year = {2003},
  12085         month = may,
  12086         pages = {16--27},
  12087         publisher = {IEEE Computer Society  Washington, DC, USA},
  12088         organization = {IEEE Computer Society  Washington, DC, USA},
  12089         abstract = {The goal of anonymity providing techniques is to preserve the privacy of users, who has communicated with whom, for how long, and from which location, by hiding traffic information. This is accomplished by organizing additional traffic to conceal particular communication relationships and by embedding the sender and receiver of a message in their respective anonymity sets. If the number of overall participants is greater than the size of the anonymity set and if the anonymity set changes with time due to unsynchronized participants, then the anonymity technique becomes prone to traffic analysis attacks. In this paper, we are interested in the statistical properties of the disclosure attack, a newly suggested traffic analysis attack on the MIXes. Our goal is to provide analytical estimates of the number of observations required by the disclosure attack and to identify fundamental (but avoidable) {\textquoteleft}weak operational modes' of the MIXes and thus to protect users against a traffic analysis by the disclosure attack},
  12090         www_section = {anonymity measurement, mix, traffic analysis},
  12091         isbn = {0-7695-1940-7},
  12092         url = {http://portal.acm.org/citation.cfm?id=829515.830557},
  12093         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/agrawal03.pdf},
  12094         author = {Dakshi Agrawal and Dogan Kesdogan and Stefan Penz}
  12095 }
  12096 @conference {BM:mixencrypt,
  12097         title = {Provably Secure Public-Key Encryption for Length-Preserving Chaumian Mixes},
  12098         booktitle = {Proceedings of CT-RSA 2003},
  12099         year = {2003},
  12100         month = {April},
  12101         publisher = {Springer-Verlag, LNCS 2612},
  12102         organization = {Springer-Verlag, LNCS 2612},
  12103         abstract = {Mix chains as proposed by Chaum allow sending untraceable electronic e-mail without requiring trust in a single authority: messages are recursively public-key encrypted to multiple intermediates (mixes), each of which forwards the message after removing one layer of encryption. To conceal as much information as possible when using variable (source routed) chains, all messages passed to mixes should be of the same length; thus, message length should not decrease when a mix transforms an input message into the corresponding output message directed at the next mix in the chain. Chaum described an implementation
  12104 for such length-preserving mixes, but it is not secure against active attacks. We show how to build practical cryptographically secure lengthpreserving mixes. The conventional denition of security against chosen ciphertext attacks is not applicable to length-preserving mixes; we give an appropriate denition and show that our construction achieves provable security},
  12105         www_section = {mix chain, public key cryptography},
  12106         url = {http://eprints.kfupm.edu.sa/59837/},
  12107         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BM-mixencrypt.pdf},
  12108         author = {Bodo M{\"o}ller}
  12109 }
  12110 @conference {Serjantov03puzzlesin,
  12111         title = {Puzzles in P2P Systems},
  12112         booktitle = {8th CaberNet Radicals Workshop},
  12113         year = {2003},
  12114         month = oct,
  12115         publisher = {Network of Excellence in Distributed and Dependable Computing Systems},
  12116         organization = {Network of Excellence in Distributed and Dependable Computing Systems},
  12117         address = {Ajaccio, Corsica},
  12118         abstract = {In this paper we consider using client puzzles to provide incentives for users in a peer-to-peer system to behave in a uniform way. The techniques developed can be used to encourage users of a system to share content (combating the free riding problem) or perform {\textquoteleft}community' tasks},
  12119         www_section = {p2p network, puzzle},
  12120         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CaberNet\%20Radicals\%20Workshop\%20-\%20Puzzles\%20in\%20P2P\%20Systems.pdf},
  12121         author = {Andrei Serjantov and Stephen Lewis}
  12122 }
  12123 @conference {Feldman03quantifyingdisincentives,
  12124         title = {Quantifying Disincentives in Peer-to-Peer Networks},
  12125         booktitle = {Workshop on Economics of Peer-to-Peer Systems},
  12126         year = {2003},
  12127         month = jun,
  12128         address = {Berkeley, CA},
  12129         abstract = {In this paper, we use modeling and simulation to better understand the effects of cooperation on user performance and to quantify the performance-based disincentives in a peer-to-peer file sharing system. This is the first step towards building an incentive system. For the models developed in this paper, we have the following results: Although performance improves significantly when cooperation increases from low to moderate levels, the improvement diminishes thereafter. In particular, the mean delay to download a file when 5\% of the nodes share files is 8x more than when 40\% of the nodes share files, while the mean download delay when 40\% of the nodes share is only 1.75x more than when 100\% share},
  12130         www_section = {incentives, peer-to-peer networking},
  12131         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Feldman\%2C\%20Lai\%2C\%20Chuang\%20\%26\%20Stoica\%20-\%20Quantifying\%20disincentives\%20in\%20peer-to-peer\%20networks.pdf},
  12132         author = {Michal Feldman and Kevin Lai and John Chuang and Ion Stoica}
  12133 }
  12134 @conference {Huebsch:2003:QIP:1315451.1315480,
  12135         title = {Querying the internet with PIER},
  12136         booktitle = {Proceedings of the 29th international conference on Very large data bases--Volume 29},
  12137         series = {VLDB '03},
  12138         year = {2003},
  12139         pages = {321--332},
  12140         publisher = {VLDB Endowment},
  12141         organization = {VLDB Endowment},
  12142         www_section = {distributed hash table, PIER, range queries},
  12143         isbn = {0-12-722442-4},
  12144         url = {http://dl.acm.org/citation.cfm?id=1315451.1315480},
  12145         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/vldb03-pier.pdf},
  12146         author = {Huebsch, Ryan and Hellerstein, Joseph M. and Lanham, Nick and Boon Thau Loo and S Shenker and Ion Stoica}
  12147 }
  12148 @booklet {RatnasamyHellersteinShenker2003RangeQueries,
  12149         title = {Range Queries over DHTs},
  12150         year = {2003},
  12151         abstract = {Distributed Hash Tables (DHTs) are scalable peer-to-peer systems that support exact match lookups. This paper describes the construction and use of a Prefix Hash Tree (PHT) -- a distributed data structure that supports range queries over DHTs. PHTs use the hash-table interface of DHTs to construct a search tree that is efficient (insertions/lookups take \#\#\#\#\# \#\#\# \#\#\#\# DHT lookups, where D is the data domain being indexed) and robust (the failure of any given node in the search tree does not affect the availability of data stored at other nodes in the PHT)},
  12152         www_section = {distributed hash table, P2P, queries, range},
  12153         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.6.243},
  12154         author = {Ratnasamy, Sylvia and Hellerstein, Joseph M. and S Shenker}
  12155 }
  12156 @conference {GKK03,
  12157         title = {Rapid Mixing and Security of Chaum's Visual Electronic Voting},
  12158         booktitle = {Proceedings of ESORICS 2003},
  12159         year = {2003},
  12160         month = {October},
  12161         publisher = {Springer Berlin / Heidelberg},
  12162         organization = {Springer Berlin / Heidelberg},
  12163         abstract = {Recently, David Chaum proposed an electronic voting scheme that combines visual cryptography and digital processing. It was designed to meet not only mathematical security standards, but also to be accepted by voters that do not trust electronic devices.
  12164 In this scheme mix-servers are used to guarantee anonymity of the votes in the counting process. The mix-servers are operated by different parties, so an evidence of their correct operation is necessary. For this purpose the protocol uses randomized partial checking of Jakobsson et al., where some randomly selected connections between the (encoded) inputs and outputs of a mix-server are revealed. This leaks some information about the ballots, even if intuitively this information cannot be used for any efficient attack.
  12165 We provide a rigorous stochastic analysis of how much information is revealed by randomized partial checking in the Chaums protocol. We estimate how many mix-servers are necessary for a fair security level. Namely, we consider probability distribution of the permutations linking the encoded votes with the decoded votes given the information revealed by randomized partial checking. We show that the variation distance between this distribution and the uniform distribution is already for a constant number of mix-servers (n is the number of voters). This means that a constant number of trustees in the Chaums protocol is enough to obtain provable security. The analysis also shows that certain details of the Chaums protocol can be simplified without lowering security level},
  12166         www_section = {electronic voting, Markov chain, path coupling, randomized partial checking, rapid mixing},
  12167         isbn = {978-3-540-20300-1},
  12168         doi = {10.1007/b13237},
  12169         url = {http://www.springerlink.com/content/5gmj68nn4x1xc4j1/},
  12170         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GKK03.pdf},
  12171         author = {Marcin Gomulkiewicz and Marek Klonowski and Miroslaw Kutylowski}
  12172 }
  12173 @conference { maymounkov:rateless,
  12174         title = {Rateless Codes and Big Downloads},
  12175         booktitle = {IPTPS'03--Proceedings in the 2th International Workshop on Peer-to-Peer Systems},
  12176         series = {Lecture Notes in Computer Science},
  12177         volume = {2735},
  12178         year = {2003},
  12179         month = feb,
  12180         pages = {247--255},
  12181         publisher = {Springer},
  12182         organization = {Springer},
  12183         address = {Berkeley, CA, USA},
  12184         abstract = {This paper presents a novel algorithm for downloading big files from multiple sources in peer-to-peer networks. The algorithm is simple, but offers several compelling properties. It ensures low hand-shaking overhead between peers that download files (or parts of files) from each other. It is computationally efficient, with cost linear in the amount of data transfered. Most importantly, when nodes leave the network in the middle of uploads, the algorithm minimizes the duplicate information shared by nodes with truncated downloads. Thus, any two peers with partial knowledge of a given file can almost always fully benefit from each other's knowledge. Our algorithm is made possible by the recent introduction of linear-time, rateless erasure codes},
  12185         www_section = {algorithms, big files, download, multiple sources, rateless code},
  12186         doi = {10.1007/978-3-540-45172-3_23},
  12187         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2703\%20-\%20Rateless\%20codes\%20and\%20big\%20downloads.pdf},
  12188         author = {Petar Maymounkov and David Mazi{\`e}res}
  12189 }
  12190 @conference {incomparable-pkeys,
  12191         title = {Receiver Anonymity via Incomparable Public Keys},
  12192         booktitle = {Proceedings of the 10th ACM Conference on Computer and Communications Security (CCS 2003)},
  12193         year = {2003},
  12194         month = {October},
  12195         pages = {112--121},
  12196         publisher = {ACM Press},
  12197         organization = {ACM Press},
  12198         abstract = {We describe a new method for protecting the anonymity of message receivers in an untrusted network. Surprisingly, existing methods fail to provide the required level of anonymity for receivers (although those methods do protect sender anonymity). Our method relies on the use of multicast, along with a novel cryptographic primitive that we call an Incomparable Public Key cryptosystem, which allows a receiver to efficiently create many anonymous "identities" for itself without divulging that these separate "identities" actually refer to the same receiver, and without increasing the receiver's workload as the number of identities increases. We describe the details of our method, along with a prototype implementation},
  12199         www_section = {anonymity, PGP, privacy, public key cryptography},
  12200         isbn = {1-58113-738-9},
  12201         doi = {10.1145/948109.948127},
  12202         url = {http://portal.acm.org/citation.cfm?id=948127},
  12203         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/incomparable-pkeys.pdf},
  12204         author = {Waters, Brent and Edward W. Felten and Amit Sahai},
  12205         editor = {Vijay Atluri and Peng Liu}
  12206 }
  12207 @conference {rep-anon,
  12208         title = {Reputation in P2P Anonymity Systems},
  12209         booktitle = {Proceedings of Workshop on Economics of Peer-to-Peer Systems},
  12210         year = {2003},
  12211         month = {June},
  12212         abstract = {Decentralized anonymity systems tend to be unreliable, because users must choose nodes in the network without knowing the entire state of the network. Reputation systems promise to improve reliability by predicting network state. In this paper we focus on anonymous remailers and anonymous publishing, explain why the systems can benefit from reputation, and describe our experiences designing reputation systems for them while still ensuring anonymity. We find that in each example we first must redesign the underlying anonymity system to support verifiable transactions},
  12213         www_section = {anonymity, anonymous publishing, remailer, reputation},
  12214         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.4740},
  12215         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rep-anon.pdf},
  12216         author = {Roger Dingledine and Nick Mathewson and Paul Syverson}
  12217 }
  12218 @conference {Dingledine03reputationin,
  12219         title = {Reputation in P2P Anonymity Systems},
  12220         booktitle = {In Workshop on Economics of Peer-to-Peer Systems},
  12221         year = {2003},
  12222         abstract = {Decentralized anonymity systems tend to be unreliable, because users must choose nodes in the network without knowing the entire state of the network. Reputation systems promise to improve reliability by predicting network state. In this paper we focus on anonymous remailers and anonymous publishing, explain why the systems can benefit from reputation, and describe our experiences designing reputation systems for them while still ensuring anonymity. We find that in each example we first must redesign the underlying anonymity system to support verifiable transactions},
  12223         www_section = {anonymity, P2P, redundancy, remailer},
  12224         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.4740},
  12225         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.14.4740.pdf},
  12226         author = {Roger Dingledine and Nick Mathewson and Paul Syverson}
  12227 }
  12228 @conference {Padmanabhan:2003:RPS:951950.952204,
  12229         title = {Resilient Peer-to-Peer Streaming},
  12230         booktitle = {ICNP'03. Proceedings of the 11th IEEE International Conference on Network Protocols},
  12231         series = {ICNP '03},
  12232         year = {2003},
  12233         month = nov,
  12234         pages = {0--16},
  12235         publisher = {IEEE Computer Society},
  12236         organization = {IEEE Computer Society},
  12237         address = {Atlanta, Georgia, USA},
  12238         abstract = {We consider the problem of distributing "live" streaming media content to a potentially large and highly dynamic population of hosts. Peer-to-peer content distribution is attractive in this setting because the bandwidth available to serve content scales with demand. A key challenge, however, is making content distribution robust to peer transience. Our approach to providing robustness is to introduce redundancy, both in network paths and in data. We use multiple, diverse distribution trees to provide redundancy in network paths and multiple description coding (MDC) to provide redundancy in data.We present a simple tree management algorithm that provides the necessary path diversity and describe an adaptation framework for MDC based on scalable receiver feedback. We evaluate these using MDC applied to real video data coupled with real usage traces from a major news site that experienced a large flash crowd for live streaming content. Our results show very significant benefits in using multiple distribution trees and MDC, with a 22 dB improvement in PSNR in some cases},
  12239         www_section = {distribution trees, mdc, media content, multiple description coding, peer-to-peer streaming},
  12240         isbn = {0-7695-2024-3},
  12241         url = {http://dl.acm.org/citation.cfm?id=951950.952204},
  12242         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICNP\%2703\%20-\%20Resilient\%20peer-to-peer\%20streaming.pdf},
  12243         author = {Venkata N. Padmanabhan and Wang, Helen J. and Chou, Philip A.}
  12244 }
  12245 @conference {Fuhrmann03resultson,
  12246         title = {Results on the practical feasibility of programmable network services},
  12247         booktitle = {In 2nd International Workshop on Active Network Technologies and Applications (ANTA)},
  12248         year = {2003},
  12249         abstract = {Active and programmable networks have been subject to intensive and successful research activities during the last couple of years. Many ideas and concepts have been pursued. However, only a few prototype implementations that have been developed so far, can deal with different applications in a larger scale setting. Moreover, detailed performance analyses of such prototypes are greatly missing today. Therefore, this paper does not present yet another architecture for active and programmable networks. In contrast, it rather focuses on the performance evaluation of the so-called AMnet approach that has already been presented previously [1]. As such, the paper demonstrates that an operational high-performance programmable network system with AAA (authentication, authorization, and accounting) security functionality will in fact be feasible in the near future},
  12250         www_section = {programmable networks},
  12251         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.3074},
  12252         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03performance.pdf},
  12253         author = {Thomas Fuhrmann and Till Harbaum and Panos Kassianidis and Marcus Schoeller and Martina Zitterbart}
  12254 }
  12255 @conference {reusable-channels:wpes2003,
  12256         title = {Reusable Anonymous Return Channels},
  12257         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2003)},
  12258         year = {2003},
  12259         month = oct,
  12260         publisher = {ACM  New York, NY, USA},
  12261         organization = {ACM  New York, NY, USA},
  12262         address = {Washington, DC, USA},
  12263         abstract = {Mix networks are used to deliver messages anonymously to recipients, but do not straightforwardly allow the recipient of an anonymous message to reply to its sender. Yet the ability to reply one or more times, and to further reply to replies, is essential to a complete anonymous conversation. We propose a protocol that allows a sender of anonymous messages to establish a reusable anonymous return channel. This channel enables any recipient of one of these anonymous messages to send back one or more anonymous replies. Recipients who reply to different messages can not test whether two return channels are the same, and there-fore can not learn whether they are replying to the same person. Yet the fact that multiple recipients may send multiple replies through the same return channel helps defend against the counting attacks that defeated earlier proposals for return channels. In these attacks, an adversary traces the origin of a message by sending a specific number of replies and observing who collects the same number of messages. Our scheme resists these attacks because the replies sent by an attacker are mixed with other replies submitted by other recipients through the same return channel. Moreover, our protocol straightforwardly allows for replies to replies, etc. Our protocol is based upon a re-encryption mix network, and requires four times the amount of computation and communication of a basic mixnet},
  12264         www_section = {anonymity, privacy, return address},
  12265         isbn = {1-58113-776-1},
  12266         doi = {10.1145/1005140.1005155},
  12267         url = {http://portal.acm.org/citation.cfm?id=1005155},
  12268         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reusable-channels-wpes2003.pdf},
  12269         author = {Philippe Golle and Jakobsson, Markus}
  12270 }
  12271 @conference {2003_8,
  12272         title = {Revealing Information While Preserving Privacy},
  12273         booktitle = {Proceedings of the Twenty-second ACM SIGMOD-SIGACT-SIGART Symposium on Principles of Database Systems},
  12274         year = {2003},
  12275         publisher = {ACM},
  12276         organization = {ACM},
  12277         address = {New York, NY, USA},
  12278         abstract = {We examine the tradeoff between privacy and usability of statistical databases. We model a statistical database by an n-bit string d1 ,.., dn , with a query being a subset q ⊆ [n] to be answered by summation of values which belong to q. Our main result is a polynomial reconstruction algorithm of data from noisy (perturbed) subset sums. Applying this reconstruction algorithm to statistical databases we show that in order to achieve  privacy one has to add perturbation of magnitude Ω (√ n). That is, smaller perturbation always results in a strong violation of privacy. We show that this result is tight by exemplifying access algorithms for statistical databases that preserve privacy while adding perturbation of magnitude O (√ n). For time-T bounded adversaries we demonstrate a privacy-preserving access algorithm whose perturbation magnitude is ≈ √T },
  12279         www_section = {data reconstruction, integrity and security, subset-sums with noise},
  12280         isbn = {1-58113-670-6},
  12281         doi = {10.1145/773153.773173},
  12282         url = {http://doi.acm.org/10.1145/773153.773173},
  12283         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RevelaingInformation2003Dinur.pdf},
  12284         author = {Dinur, Irit and Nissim, Kobbi}
  12285 }
  12286 @conference {Lpcox03samsara:honor,
  12287         title = {Samsara: Honor Among Thieves in Peer-to-Peer Storage},
  12288         booktitle = {SOSP'03--Proceedings of the Nineteenth ACM Symposium on Operating Systems Principles},
  12289         year = {2003},
  12290         month = oct,
  12291         pages = {120--132},
  12292         publisher = {ACM Press},
  12293         organization = {ACM Press},
  12294         address = {Bolton Landing, NY, USA},
  12295         abstract = {Peer-to-peer storage systems assume that their users consume resources in proportion to their contribution. Unfortunately, users are unlikely to do this without some enforcement mechanism. Prior solutions to this problem require centralized infrastructure, constraints on data placement, or ongoing administrative costs. All of these run counter to the design philosophy of peer-to-peer systems. requiring trusted third parties, symmetric storage relationships, monetary payment, or certified identities. Each peer that requests storage of another must agree to hold a claim in return---a placeholder that accounts for available space. After an exchange, each partner checks the other to ensure faithfulness. Samsara punishes unresponsive nodes probabilistically. Because objects are replicated, nodes with transient failures are unlikely to suffer data loss, unlike those that are dishonest or chronically unavailable. Claim storage overhead can be reduced when necessary by forwarding among chains of nodes, and eliminated when cycles are created. Forwarding chains increase the risk of exposure to failure, but such risk is modest under reasonable assumptions of utilization and simultaneous, persistent failure},
  12296         www_section = {P2P, reputation},
  12297         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.6734},
  12298         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p135-cox.pdf},
  12299         author = {Landon P. Cox and Brian D. Noble}
  12300 }
  12301 @conference {2003_9,
  12302         title = {Scalable Application-level Anycast for Highly Dynamic Groups},
  12303         booktitle = {NGC'03 Networked Group Communication, Fifth International COST264 Workshop},
  12304         series = {Lecture Notes in Computer Science, 2003},
  12305         volume = {2816},
  12306         year = {2003},
  12307         month = sep,
  12308         pages = {47--57},
  12309         publisher = {Springer},
  12310         organization = {Springer},
  12311         address = {Munich, Germany},
  12312         abstract = {We present an application-level implementation of anycast for highly dynamic groups. The implementation can handle group sizes varying from one to the whole Internet, and membership maintenance is efficient enough to allow members to join for the purpose of receiving a single message. Key to this efficiency is the use of a proximity-aware peer-to-peer overlay network for decentralized, lightweight group maintenance; nodes join the overlay once and can join and leave many groups many times to amortize the cost of maintaining the overlay. An anycast implementation with these properties provides a key building block for distributed applications. In particular, it enables management and location of dynamic resources in large scale peer-to-peer systems. We present several resource management applications that are enabled by our implementation},
  12313         www_section = {anycast, application-level, highly dynamic groups, peer-to-peer networking},
  12314         doi = {10.1007/978-3-540-39405-1_5},
  12315         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NGC\%2703\%20-\%20Scalable\%20Application-level\%20Anycast\%20.pdf},
  12316         author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Antony Rowstron}
  12317 }
  12318 @article {776703,
  12319         title = {Security Performance},
  12320         journal = {IEEE Internet Computing},
  12321         volume = {7},
  12322         number = {3},
  12323         year = {2003},
  12324         pages = {84--87},
  12325         publisher = {IEEE Educational Activities Department},
  12326         address = {Piscataway, NJ, USA},
  12327         abstract = {Several protocols and mechanisms aim to enforce the various dimensions of security in applications ranging from email to e-commerce transactions. Adding such mechanisms and proceduresto applications and systems does not come cheaply, however, as they impose security trade-offs in the areas of performance and scalability},
  12328         www_section = {security policy, trade-off},
  12329         issn = {1089-7801},
  12330         doi = {10.1109/MIC.2003.1200305},
  12331         url = {http://portal.acm.org/citation.cfm?id=776703$\#$},
  12332         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE-IC-SecurityPerformance-May-2003.pdf},
  12333         author = {Menasc{\'e}, Daniel}
  12334 }
  12335 @conference {Qiu:2003:SRI:863955.863974,
  12336         title = {On selfish routing in internet-like environments},
  12337         booktitle = {SIGCOMM'03. Proceedings of the 2003 conference on Applications, technologies, architectures, and protocols for computer communications},
  12338         series = {SIGCOMM '03},
  12339         year = {2003},
  12340         month = aug,
  12341         pages = {151--162},
  12342         publisher = {ACM},
  12343         organization = {ACM},
  12344         address = {Karlsruhe, Germany},
  12345         abstract = {A recent trend in routing research is to avoid inefficiencies in network-level routing by allowing hosts to either choose routes themselves (e.g., source routing) or use overlay routing networks (e.g., Detour or RON). Such approaches result in selfish routing, because routing decisions are no longer based on system-wide criteria but are instead designed to optimize host-based or overlay-based metrics. A series of theoretical results showing that selfish routing can result in suboptimal system behavior have cast doubts on this approach. In this paper, we use a game-theoretic approach to investigate the performance of selfish routing in Internet-like environments. We focus on intra-domain network environments and use realistic topologies and traffic demands in our simulations. We show that in contrast to theoretical worst cases, selfish routing achieves close to optimal average latency in such environments. However, such performance benefit comes at the expense of significantly increased congestion on certain links. Moreover, the adaptive nature of selfish overlays can significantly reduce the effectiveness of traffic engineering by making network traffic less predictable},
  12346         www_section = {game theory, optimization, overlay, relaxation, selfish routing, traffic engineering, traffic equilibrium},
  12347         isbn = {1-58113-735-4},
  12348         doi = {http://doi.acm.org/10.1145/863955.863974},
  12349         url = {http://doi.acm.org/10.1145/863955.863974},
  12350         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2703\%20-\%20On\%20selfish\%20routing\%20in\%20internet-like\%20environments.pdf},
  12351         author = {Lili Qiu and Yang, Yang Richard and Zhang, Yin and S Shenker}
  12352 }
  12353 @article {766661,
  12354         title = {Self-Organized Public-Key Management for Mobile Ad Hoc Networks},
  12355   author={Capkun, Srdjan and Butty{\'a}n, Levente and Hubaux, J-P},
  12356         journal = {IEEE Transactions on Mobile Computing},
  12357         volume = {2},
  12358         number = {1},
  12359         year = {2003},
  12360         pages = {52--64},
  12361         publisher = {IEEE Educational Activities Department},
  12362         address = {Piscataway, NJ, USA},
  12363         abstract = {In contrast with conventional networks, mobile ad hoc networks usually do not provide online access to trusted authorities or to centralized servers, and they exhibit frequent partitioning due to link and node failures and to node mobility. For these reasons, traditional security solutions that require online trusted authorities or certificate repositories are not well-suited for securing ad hoc networks. In this paper, we propose a fully self-organized public-key management system that allows users to generate their public-private key pairs, to issue certificates, and to perform authentication regardless of the network partitions and without any centralized services. Furthermore, our approach does not require any trusted authority, not even in the system initialization phase},
  12364         www_section = {ad-hoc networks, key authentication, PGP, public key cryptography, self-organization},
  12365         issn = {1536-1233},
  12366         doi = {10.1109/TMC.2003.1195151},
  12367         url = {http://portal.acm.org/citation.cfm?id=766655.766661$\#$},
  12368         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.6.1545.pdf}
  12369 }
  12370 @conference {Naor03asimple,
  12371         title = {A Simple Fault Tolerant Distributed Hash Table},
  12372         booktitle = {In Second International Workshop on Peer-to-Peer Systems},
  12373         year = {2003},
  12374         pages = {88--97},
  12375         abstract = {We introduce a distributed hash table (DHT) with logarithmic degree and logarithmic dilation. We show two lookup algorithms. The first has a message complexity of   and is robust under random deletion of nodes. The second has parallel time of   and message complexity of   . It is robust under spam induced by a random subset of the nodes. We then show a construction which is fault tolerant against random deletions and has an optimal degree-dilation tradeoff. The construction has improved parameters when compared to other DHTs. Its main merits are its simplicity, its flexibility and the fresh ideas introduced in its design. It is very easy to modify and to add more sophisticated protocols, such as dynamic caching and erasure correcting codes},
  12376         www_section = {distributed hash table, fault-tolerance},
  12377         doi = {10.1007/b11823},
  12378         url = {http://www.springerlink.com/content/4e756fgyq4ff4kay/},
  12379         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3388.pdf},
  12380         author = {Moni Naor and Udi Wieder}
  12381 }
  12382 @conference {Harvey:2003:SSO:1251460.1251469,
  12383         title = {SkipNet: a scalable overlay network with practical locality properties},
  12384         booktitle = {Proceedings of the 4th conference on USENIX Symposium on Internet Technologies and Systems--Volume 4},
  12385         series = {USITS'03},
  12386         year = {2003},
  12387         pages = {9--9},
  12388         publisher = {USENIX Association},
  12389         organization = {USENIX Association},
  12390         address = {Berkeley, CA, USA},
  12391         www_section = {distributed hash table, range queries, SkipNet},
  12392         url = {http://dl.acm.org/citation.cfm?id=1251460.1251469},
  12393         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/harvey.pdf},
  12394         author = {Harvey, Nicholas J. A. and Michael B. Jones and Stefan Saroiu and Marvin Theimer and Wolman, Alec}
  12395 }
  12396 @booklet {Freedman03sloppyhashing,
  12397         title = {Sloppy Hashing and Self-Organizing Clusters},
  12398         journal = {In IPTPS},
  12399         volume = {Volume 2735/2003},
  12400         year = {2003},
  12401         pages = {45--55},
  12402         publisher = {Springer Berlin / Heidelberg},
  12403         abstract = {We are building Coral, a peer-to-peer content distribution system. Coral creates self-organizing clusters of nodes that fetch information from each other to avoid communicating with more distant or heavily-loaded servers. Coral indexes data, but does not store it. The actual content resides where it is used, such as in nodes' local web caches. Thus, replication happens exactly in proportion to demand},
  12404         isbn = {978-3-540-40724-9},
  12405         url = {www.coralcdn.org/docs/coral-iptps03.ps},
  12406         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/coral-iptps03.pdf},
  12407         author = {Michael J. Freedman and David Mazi{\`e}res}
  12408 }
  12409 @conference {Klemm03aspecial-purpose,
  12410         title = {A Special-Purpose Peer-to-Peer File Sharing System for Mobile Ad Hoc Networks},
  12411         booktitle = {A Special-Purpose Peer-to-Peer File Sharing System for Mobile Ad Hoc Networks},
  12412         year = {2003},
  12413         abstract = {Establishing peer-to-peer (P2P) file sharing for mobile ad hoc networks ANET) requires the construction of a search algorithm for transmitting queries and search results as well as the development of a transfer protocol for downloading files matching a query. In this paper, we present a special-purpose system for searching and file transfer tailored to both the characteristics of MANET and the requirements of peer-to-peer file sharing. Our approach is based on an application layer overlay networlc As innovative feature, overlay routes are set up on demand by the search algorithm, closely matching network topology and transparently aggregating redundant transfer paths on a per-file basis. The transfer protocol guarantees high data rates and low transmission overhead by utilizing overlay routes. In a detailed ns2 simulation study, we show that both the search algorithm and the transfer protocol outperform offthe -shelf approaches based on a P2P file sharing system for the wireline Internet, TCP and a MANET routing protocol},
  12414         www_section = {ad-hoc networks, file-sharing, P2P},
  12415         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.9634},
  12416         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/VTC03.pdf},
  12417         author = {Alexander Klemm and Er Klemm and Christoph Lindemann and Oliver Waldhorst}
  12418 }
  12419 @article {Castro:2003:SHM:1165389.945474,
  12420         title = {SplitStream: high-bandwidth multicast in cooperative environments},
  12421         journal = {SIGOPS'03 Operating Systems Review},
  12422         volume = {37},
  12423         year = {2003},
  12424         month = oct,
  12425         pages = {298--313},
  12426         publisher = {ACM},
  12427         address = {New York, NY, USA},
  12428         abstract = {In tree-based multicast systems, a relatively small number of interior nodes carry the load of forwarding multicast messages. This works well when the interior nodes are highly-available, dedicated infrastructure routers but it poses a problem for application-level multicast in peer-to-peer systems. SplitStream addresses this problem by striping the content across a forest of interior-node-disjoint multicast trees that distributes the forwarding load among all participating peers. For example, it is possible to construct efficient SplitStream forests in which each peer contributes only as much forwarding bandwidth as it receives. Furthermore, with appropriate content encodings, SplitStream is highly robust to failures because a node failure causes the loss of a single stripe on average. We present the design and implementation of SplitStream and show experimental results obtained on an Internet testbed and via large-scale network simulation. The results show that SplitStream distributes the forwarding load among all peers and can accommodate peers with different bandwidth capacities while imposing low overhead for forest construction and maintenance},
  12429         www_section = {application-level multicast, content distribution, end-system multicast, peer-to-peer networking, video streaming},
  12430         issn = {0163-5980},
  12431         doi = {http://doi.acm.org/10.1145/1165389.945474},
  12432         url = {http://doi.acm.org/10.1145/1165389.945474},
  12433         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGOSP\%2703\%20-\%20Spitstream\%3A\%20High-bandwidth\%20multicast.pdf},
  12434         author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Nandi, Animesh and Antony Rowstron and Singh, Atul}
  12435 }
  12436 @conference {statistical-disclosure,
  12437         title = {Statistical Disclosure Attacks: Traffic Confirmation in Open Environments},
  12438         booktitle = {Proceedings of Security and Privacy in the Age of Uncertainty, (SEC2003)},
  12439         year = {2003},
  12440         month = {May},
  12441         pages = {421--426},
  12442         publisher = {IFIP TC11},
  12443         organization = {IFIP TC11},
  12444         address = {Athens},
  12445         abstract = {An improvement over the previously known disclosure attack is presented that allows, using statistical methods, to effectively deanonymize users of a mix system. Furthermore the statistical disclosure attack is computationally efficient, and the conditions for it to be possible and accurate are much better understood. The new attack can be generalized easily to a variety of anonymity systems beyond mix networks},
  12446         www_section = {anonymity, statistical analysis, traffic analysis},
  12447         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.4512},
  12448         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/statistical-disclosure.pdf},
  12449         author = {George Danezis}
  12450 }
  12451 @article {942421,
  12452         title = {Stimulating cooperation in self-organizing mobile ad hoc networks},
  12453         journal = {Mob. Netw. Appl},
  12454         volume = {8},
  12455         number = {5},
  12456         year = {2003},
  12457         pages = {579--592},
  12458         publisher = {Kluwer Academic Publishers},
  12459         address = {Hingham, MA, USA},
  12460         abstract = {In military and rescue applications of mobile ad hoc networks, all the nodes belong to the same authority; therefore, they are motivated to cooperate in order to support the basic functions of the network. In this paper, we consider the case when each node is its own authority and tries to maximize the benefits it gets from the network. More precisely, we assume that the nodes are not willing to forward packets for the benefit of other nodes. This problem may arise in civilian applications of mobile ad hoc networks. In order to stimulate the nodes for packet forwarding, we propose a simple mechanism based on a counter in each node. We study the behavior of the proposed mechanism analytically and by means of simulations, and detail the way in which it could be protected against misuse },
  12461         www_section = {ad-hoc networks, cooperation, self-organization},
  12462         issn = {1383-469X},
  12463         doi = {10.1023/A:1025146013151 },
  12464         url = {http://portal.acm.org/citation.cfm?id=942421$\#$},
  12465         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ButtyanH03monet.pdf},
  12466         author = {Levente Butty{\'a}n and Jean-Pierre Hubaux}
  12467 }
  12468 @booklet {_onthe,
  12469         title = {On the Strategic Importance of Programmable Middleboxes  },
  12470         year = {2003},
  12471         abstract = {Network protocols suffer from a lock dictated by the need for standardization and Metcalf's law. Programmable middleboxes can help to relieve the effects of that lock. This paper gives game theoretic arguments that show how the option of having middleboxes can raise the quality of communication protocols. Based on this analysis, design considerations for active and programmable networks are discussed},
  12472         www_section = {programmable networks},
  12473         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.7171},
  12474         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03strategy.pdf},
  12475         author = {Thomas Fuhrmann}
  12476 }
  12477 @conference {792432,
  12478         title = {Supporting Peer-to-Peer Computing with FlexiNet},
  12479         booktitle = {CCGRID '03: Proceedings of the 3st International Symposium on Cluster Computing and the Grid},
  12480         year = {2003},
  12481         pages = {0--390},
  12482         publisher = {IEEE Computer Society},
  12483         organization = {IEEE Computer Society},
  12484         address = {Washington, DC, USA},
  12485         abstract = {Formation of suitable overlay-network topologiesthat are able to reflect the structure of the underlying network-infrastructure, has rarely been addressedby peer-to-peer applications so far. Often, peer-to-peerprotocols restrain to purely random formation of theiroverlay-network. This leads to a far from optimal performance of such peer-to-peer networks and ruthlesslywastes network resources.In this paper, we describe a simple mechanism thatuses programmable network technologies to improvethe topology formation process of unstructured peer-to-peer networks. Being a network service, our mechanismdoes not require any modification of existing applications or computing systems. By that, it assists networkoperators with improving the performance of their network and relieves programmers from the burden of designing and implementing topology-aware peer-to-peerprotocols.Although we use the well-know Gnutella protocol todescribe the mechanism of our proposed service, it applies to all kinds of unstructured global peer-to-peercomputing applications},
  12486         www_section = {overlay networks, programmable networks, topology matching},
  12487         isbn = {0-7695-1919-9},
  12488         url = {http://portal.acm.org/citation.cfm?id=791231.792432$\#$},
  12489         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03supportingP2P.pdf},
  12490         author = {Thomas Fuhrmann}
  12491 }
  12492 @conference {1251470,
  12493         title = {Symphony: distributed hashing in a small world},
  12494         booktitle = {USITS'03: Proceedings of the 4th conference on USENIX Symposium on Internet Technologies and Systems},
  12495         year = {2003},
  12496         pages = {10--10},
  12497         publisher = {USENIX Association},
  12498         organization = {USENIX Association},
  12499         address = {Berkeley, CA, USA},
  12500         abstract = {We present Symphony, a novel protocol for maintaining distributed hash tables in a wide area network. The key idea is to arrange all participants along a ring and equip them with long distance contacts drawn from a family of harmonic distributions. Through simulation, we demonstrate that our construction is scalable, flexible, stable in the presence of frequent updates and offers small average latency with only a handful of long distance links per node. The cost of updates when hosts join and leave is small},
  12501         www_section = {small-world},
  12502         url = {http://portal.acm.org/citation.cfm?id=1251460.1251470$\#$},
  12503         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/manku03symphony.pdf},
  12504         author = {Manku, Gurmeet Singh and Bawa, Mayank and Prabhakar Raghavan}
  12505 }
  12506 @conference {958494,
  12507         title = {Taming the underlying challenges of reliable multihop routing in sensor networks},
  12508         booktitle = {SenSys '03: Proceedings of the 1st international conference on Embedded networked sensor systems},
  12509         year = {2003},
  12510         pages = {14--27},
  12511         publisher = {ACM},
  12512         organization = {ACM},
  12513         address = {New York, NY, USA},
  12514         abstract = {The dynamic and lossy nature of wireless communication poses major challenges to reliable, self-organizing multihop networks. These non-ideal characteristics are more problematic with the primitive, low-power radio transceivers found in sensor networks, and raise new issues that routing protocols must address. Link connectivity statistics should be captured dynamically through an efficient yet adaptive link estimator and routing decisions should exploit such connectivity statistics to achieve reliability. Link status and routing information must be maintained in a neighborhood table with constant space regardless of cell density. We study and evaluate link estimator, neighborhood table management, and reliable routing protocol techniques. We focus on a many-to-one, periodic data collection workload. We narrow the design space through evaluations on large-scale, high-level simulations to 50-node, in-depth empirical experiments. The most effective solution uses a simple time averaged EWMA estimator, frequency based table management, and cost-based routing},
  12515         www_section = {link estimation, multi-hop networks, neighborhood management, reliability, sensor networks},
  12516         isbn = {1-58113-707-9},
  12517         doi = {10.1145/958491.958494},
  12518         url = {http://portal.acm.org/citation.cfm?id=958494$\#$},
  12519         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p14-woo.pdf},
  12520         author = {Woo, Alec and Tong, Terence and Culler, David}
  12521 }
  12522 @conference {feamster:pet2003,
  12523         title = {Thwarding Web Censorship with Untrusted Messenger Delivery},
  12524         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)},
  12525         year = {2003},
  12526         month = mar,
  12527         pages = {125--140},
  12528         publisher = {Springer-Verlag, LNCS 2760},
  12529         organization = {Springer-Verlag, LNCS 2760},
  12530         abstract = {All existing anti-censorship systems for theWeb rely on proxies to grant clients access to censored information. Therefore, they face the proxy discovery problem: how can clients discover the proxies without having the censor discover and block these proxies? To avoid widespread discovery and blocking, proxies must not be widely published and should be discovered in-band. In this paper, we present a proxy discovery mechanism called keyspace hopping that meets this goal. Similar in spirit to frequency hopping in wireless networks, keyspace hopping ensures that each client discovers only a small fraction of the total number of proxies.However, requiring clients to independently discover proxies from a large set makes it practically impossible to verify the trustworthiness of every proxy and creates the possibility of having untrusted proxies. To address
  12531 this, we propose separating the proxy into two distinct components|the messenger, which the client discovers using keyspace hopping and which simply acts as a gateway to the Internet; and the portal, whose identity is widely-published and whose responsibility it is to interpret and serve the client's requests for censored content. We show how this separation, as well as in-band proxy discovery, can be applied to a variety of anti-censorship systems},
  12532         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/feamster-pet2003.pdf},
  12533         author = {Nick Feamster and Magdalena Balazinska and Winston Wang and Hari Balakrishnan and David Karger},
  12534         editor = {Roger Dingledine}
  12535 }
  12536 @article {2003_10,
  12537         title = {On the Topology of Overlay-Networks},
  12538         year = {2003},
  12539         abstract = {Random-graph models are about to become an important tool in the study of wireless ad-hoc and sensor-networks, peer-to-peer networks, and, generally, overlay-networks. Such models provide a theoretical basis to assess the capabilities of certain networks, and guide the design of new protocols. Especially the recently proposed models for so-called small-world networks receive much attention from the networking community. This paper proposes the use of two more mathematical concepts for the analysis of network topologies, dimension and curvature. These concepts can intuitively be applied to, e.g., sensor-networks. But they can also be sensibly dened for certain other random-graph models. The latter is non-trivial since such models may describe purely virtual networks that do not inherit properties from an underlying physical world. Analysis of a random-graph model for Gnutella-like overlay-networks yields strong indications that such networks might be characterized as a sphere with fractal dimension},
  12540         journal = {unknown},
  12541         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03topology.pdf},
  12542         author = {Thomas Fuhrmann}
  12543 }
  12544 @conference {DBLP:conf/iptps/DabekZDKS03,
  12545         title = {Towards a Common API for Structured Peer-to-Peer Overlays},
  12546         booktitle = {IPTPS'03. Proccedings of the Second International Workshop on Peer-to-Peer Systems},
  12547         series = {Lecture Notes in Computer Science},
  12548         volume = {2735},
  12549         year = {2003},
  12550         month = feb,
  12551         pages = {33--44},
  12552         publisher = {Springer},
  12553         organization = {Springer},
  12554         address = {Berkeley, CA, USA},
  12555         abstract = {In this paper, we describe an ongoing effort to define common APIs for structured peer-to-peer overlays and the key abstractions that can be built on them. In doing so, we hope to facilitate independent innovation in overlay protocols, services, and applications, to allow direct experimental comparisons, and to encourage application development by third parties. We provide a snapshot of our efforts and discuss open problems in an effort to solicit feedback from the research community},
  12556         www_section = {API, key abstraction},
  12557         isbn = {3-540-40724-3},
  12558         doi = {http://dx.doi.org/10.1007/978-3-540-45172-3_3},
  12559         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2703\%20-\%20Towards\%20a\%20common\%20API.pdf},
  12560         author = {Dabek, Frank and Ben Y. Zhao and Peter Druschel and John Kubiatowicz and Ion Stoica}
  12561 }
  12562 @conference { gnunettransport,
  12563         title = {A Transport Layer Abstraction for Peer-to-Peer Networks},
  12564         booktitle = {Proceedings of the 3rd International Symposium on Cluster Computing and the Grid (GRID 2003)},
  12565         year = {2003},
  12566         pages = {398--403},
  12567         publisher = {IEEE Computer Society},
  12568         organization = {IEEE Computer Society},
  12569         abstract = {The initially unrestricted host-to-host communication model provided by the Internet Protocol has deteriorated due to political and technical changes caused by Internet growth. While this is not a problem for most client-server applications, peer-to-peer networks frequently struggle with peers that are only partially reachable. We describe how a peer-to-peer framework can hide diversity and obstacles in the underlying Internet and provide peer-to-peer applications with abstractions that hide transport specific details. We present the details of an implementation of a transport service based on SMTP. Small-scale benchmarks are used to compare transport services over UDP, TCP, and SMTP},
  12570         www_section = {GNUnet, P2P},
  12571         url = {http://grothoff.org/christian/transport.pdf},
  12572         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/transport.pdf},
  12573         author = {Ronaldo A. Ferreira and Christian Grothoff and Paul Ruth}
  12574 }
  12575 @conference {642636,
  12576         title = {Usability and privacy: a study of Kazaa P2P file-sharing},
  12577         booktitle = {CHI '03: Proceedings of the SIGCHI conference on Human factors in computing systems},
  12578         year = {2003},
  12579         pages = {137--144},
  12580         publisher = {ACM},
  12581         organization = {ACM},
  12582         address = {New York, NY, USA},
  12583         abstract = {P2P file sharing systems such as Gnutella, Freenet, and KaZaA, while primarily intended for sharing multimedia files, frequently allow other types of information to be shared. This raises serious concerns about the extent to which users may unknowingly be sharing private or personal information.In this paper, we report on a cognitive walkthrough and a laboratory user study of the KaZaA file sharing user interface. The majority of the users in our study were unable to tell what files they were sharing, and sometimes incorrectly assumed they were not sharing any files when in fact they were sharing all files on their hard drive. An analysis of the KaZaA network suggested that a large number of users appeared to be unwittingly sharing personal and private files, and that some users were indeed taking advantage of this and downloading files containing ostensibly private information},
  12584         www_section = {file-sharing, P2P},
  12585         isbn = {1-58113-630-7},
  12586         doi = {10.1145/642611.642636},
  12587         url = {http://portal.acm.org/citation.cfm?id=642611.642636$\#$},
  12588         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HPL-2002-163.pdf},
  12589         author = {Good, Nathaniel S. and Krekelberg, Aaron}
  12590 }
  12591 @booklet {Fuhrmann_usingbluetooth,
  12592         title = {Using Bluetooth for Informationally Enhanced Environments Abstract},
  12593         year = {2003},
  12594         abstract = {The continued miniaturization in computing and wireless communication is about to make informationally enhanced environments become a reality. Already today, devices like a notebook computer or a personal digital assistent (PDA) can easily connect to the Internet via IEEE 802.11 networks (WaveLAN) or similar technologies provided at so-called hot-spots. In the near future, even smaller devices can join a wireless network to exchange status information or send and receive commands. In this paper, we present sample uses of a generic Bluetooth component that we have developed and that has been successfully integrated into various mininature devices to transmit sensor data or exchange control commands. The use of standard protocols like TCP/IP, Obex, and HTTP simplifies the use of those devices with conventional devices (notebook, PDA, cell-phone) without even requiring special drivers or applications for these devices. While such scenarios have already often been dreamt of, we are able to present a working solution based on small and cost-effective standard elements. We describe two applications that illustrate the power this approach in the broad area of e-commerce, e-learning, and e-government: the BlueWand, a small, pen-like device that can control Bluetooth devices in its vincinity by simple gestures, and a door plate that can display messages that are posted to it e.g. by a Bluetooth PDA. Keywords: Human-Computer Interaction, Ubiquitous Computing, Wireless Communications (Bluetooth)},
  12595         www_section = {Bluetooth, ubiquitous computing},
  12596         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.73.2131},
  12597         author = {Thomas Fuhrmann and Till Harbaum}
  12598 }
  12599 @article {shsm03,
  12600         title = {Using Caching for Browsing Anonymity},
  12601         journal = {ACM SIGEcom Exchanges},
  12602         volume = {4},
  12603         number = {2},
  12604         year = {2003},
  12605         month = {September},
  12606         pages = {11--20  },
  12607         abstract = {Privacy-providing tools, including tools that provide anonymity, are gaining popularity in the modern world. Among the goals of their users is avoiding tracking and profiling. While some businesses are unhappy with the growth of privacy-enhancing technologies, others can use lack of information about their users to avoid unnecessary liability and even possible harassment by parties with contrary business interests, and to gain a competitive market edge.Currently, users interested in anonymous browsing have the choice only between single-hop proxies and the few more complex systems that are available. These still leave the user vulnerable to long-term intersection attacks.In this paper, we propose a caching proxy system for allowing users to retrieve data from the World-Wide Web in a way that would provide recipient unobservability by a third party and sender unobservability by the recipient and thus dispose with intersection attacks, and report on the prototype we built using Google},
  12608         www_section = {anonymity, caching proxies, privacy},
  12609         doi = {10.1145/1120709.1120713},
  12610         url = {http://portal.acm.org/citation.cfm?id=1120713},
  12611         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shsm03.pdf},
  12612         author = {Anna Shubina and Sean Smith}
  12613 }
  12614 @article {939859,
  12615         title = {Wireless Community Networks},
  12616         journal = {Computer},
  12617         volume = {36},
  12618         number = {8},
  12619         year = {2003},
  12620         pages = {90--92},
  12621         publisher = {IEEE Computer Society Press},
  12622         address = {Los Alamitos, CA, USA},
  12623         issn = {0018-9162},
  12624         doi = {10.1109/MC.2003.1220588},
  12625         url = {http://portal.acm.org/citation.cfm?id=939824.939859$\#$},
  12626         author = {Jain, Saurabh and Agrawal, Dharma P.}
  12627 }
  12628 @booklet {Boulkenafed02adhocfs:sharing,
  12629         title = {AdHocFS: Sharing Files in WLANs},
  12630         year = {2002},
  12631         abstract = {This paper presents the ADHOCFS file system for mobileusers, which realizes transparent, adaptive file accessaccording to the users' specific situations (e.g., device inuse, network connectivity, etc).The paper concentratesmore specifically on the support of ADHOCFS for collaborativefile sharing within ad hoc groups of trusted nodesthat are in the local communication of each other using theunderlying ad hoc network, which has not been addressedin the past},
  12632         www_section = {ad-hoc networks},
  12633         isbn = {0-7695-1938-5},
  12634         url = {http://portal.acm.org/citation.cfm?id=825345},
  12635         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.9956.pdf},
  12636         author = {Malika Boulkenafed and Valerie Issarny}
  12637 }
  12638 @conference {BonehGolle:psp2002,
  12639         title = {Almost Entirely Correct Mixing With Application to Voting},
  12640         booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications Security (CCS 2002)},
  12641         year = {2002},
  12642         month = {November},
  12643         pages = {68--77},
  12644         publisher = {ACM  New York, NY, USA},
  12645         organization = {ACM  New York, NY, USA},
  12646         address = {Washington, DC},
  12647         abstract = {In order to design an exceptionally efficient mix network, both asymptotically and in real terms, we develop the notion of almost entirely correct mixing, and propose a new mix network that is almost entirely correct. In our new mix, the real cost of proving correctness is orders of magnitude faster than all other mix nets. The trade-off is that our mix only guarantees "almost entirely correct" mixing, i.e it guarantees that the mix network processed correctly all inputs with high (but not overwhelming) probability. We use a new technique for verifying correctness. This new technique consists of computing the product of a random subset of the inputs to a mix server, then require the mix server to produce a subset of the outputs of equal product. Our new mix net is of particular value for electronic voting, where a guarantee of almost entirely correct mixing may well be sufficient to announce instantly the result of a large election. The correctness of the result can later be verified beyond a doubt using any one of a number of much slower proofs of perfect-correctness, without having to mix the ballots again},
  12648         www_section = {electronic voting},
  12649         isbn = {1-58113-612-9},
  12650         doi = {10.1145/586110.586121},
  12651         url = {http://portal.acm.org/citation.cfm?id=586121},
  12652         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BonehGolle-psp2002.pdf},
  12653         author = {Dan Boneh and Philippe Golle},
  12654         editor = {Vijay Atluri}
  12655 }
  12656 @conference {664025,
  12657         title = {AMnet 2.0: An Improved Architecture for Programmable Networks},
  12658         booktitle = {IWAN '02: Proceedings of the IFIP-TC6 4th International Working Conference on Active Networks},
  12659         year = {2002},
  12660         pages = {162--176},
  12661         publisher = {Springer-Verlag},
  12662         organization = {Springer-Verlag},
  12663         address = {London, UK},
  12664         abstract = {AMnet 2.0 is an improved architecture for programmable networks that is based on the experiences from the previous implementation of AMnet. This paper gives an overview of the AMnet architecture and Linux-based implementation of this software router. It also discusses the differences to the previous version of AMnet. AMnet 2.0 complements application services with net-centric services in an integrated system that provides the fundamental building blocks both for an active node itself and the operation of a larger set of nodes, including code deployment decisions, service relocation, resource management},
  12665         www_section = {programmable networks},
  12666         isbn = {3-540-00223-5},
  12667         doi = {10.1007/3-540-36199-5},
  12668         url = {http://portal.acm.org/citation.cfm?id=664025$\#$},
  12669         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann02architecture_0.pdf},
  12670         author = {Thomas Fuhrmann and Till Harbaum and Marcus Schoeller and Martina Zitterbart}
  12671 }
  12672 @conference {RRMPH02-1,
  12673         title = {Analysis of an Anonymity Network for Web Browsing},
  12674         booktitle = {Proceedings of the IEEE 7th Intl. Workshop on Enterprise Security (WET ICE 2002)},
  12675         year = {2002},
  12676         month = {June},
  12677         pages = {49--54},
  12678         publisher = {IEEE Computer Society  Washington, DC, USA},
  12679         organization = {IEEE Computer Society  Washington, DC, USA},
  12680         address = {Pittsburgh, USA},
  12681         abstract = {Various systems offering anonymity for near real-time Internet traffic have been operational. However, they did not deliver many quantitative results about performance, bandwidth overhead, or other issues that arise when implementing or operating such a system. Consequently, the problem of designing and operating these systems in a way that they provide a good balance between usability, protection from attacks, and overhead is not well understood. In this paper, we present the analysis of an anonymity network for web browsing that offers a high level of anonymity against a sophisticated attacker and good end-to-end performance at a reasonable bandwidth overhead. We describe a novel way of operating the system that maximizes the protection from traffic analysis attacks while minimizing the bandwidth overhead. We deliver quantitative results about the performance of our system, which should help to give a better understanding of anonymity networks},
  12682         www_section = {anonymity, anonymous web browsing},
  12683         isbn = {0-7695-1748-X},
  12684         url = {http://portal.acm.org/citation.cfm?id=759973},
  12685         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RRMPH02-1.pdf},
  12686         author = {Marc Rennhard and Sandro Rafaeli and Laurent Mathy and Bernhard Plattner and David Hutchison}
  12687 }
  12688 @conference {wright02,
  12689         title = {An Analysis of the Degradation of Anonymous Protocols},
  12690         booktitle = {Proceedings of the Network and Distributed Security Symposium--NDSS '02},
  12691         year = {2002},
  12692         month = feb,
  12693         publisher = {IEEE},
  12694         organization = {IEEE},
  12695         abstract = {There have been a number of protocols proposed for anonymous network communication. In this paper we investigate attacks by corrupt group members that degrade the anonymity of each protocol over time. We prove that when a particular initiator continues communication with a particular responder across path reformations, existing protocols are subject to the attack. We use this result to place an upper bound on how long existing protocols, including Crowds, Onion Routing, Hordes, Web Mixes, and DC-Net, can maintain anonymity in the face of the attacks described. Our results show that fully-connected DC-Net is the most resilient to these attacks, but it su$\#$ers from scalability issues that keep anonymity group sizes small. Additionally, we show how violating an assumption of the attack allows malicious users to setup other participants to falsely appear to be the initiator of a connection},
  12696         www_section = {anonymity, Crowds, dining cryptographers, Hordes, onion routing},
  12697         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.9435},
  12698         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wright-degrade.pdf},
  12699         author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}
  12700 }
  12701 @booklet {Serjantov02anonymizingcensorship,
  12702         title = {Anonymizing Censorship Resistant Systems},
  12703         volume = { Vol. 2429},
  12704         year = {2002},
  12705         pages = {111--120 },
  12706         publisher = {Springer-Verlag  London, UK},
  12707         abstract = {In this paper we propose a new Peer-to-Peer architecture for a censorship resistant system with user, server and active-server document anonymity as well as efficient document retrieval. The retrieval service is layered on top of an existing Peer-to-Peer infrastructure, which should facilitate its implementation},
  12708         isbn = {3-540-44179-4},
  12709         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.13.5048\&rep=rep1\&type=pdf},
  12710         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.5048.pdf},
  12711         author = {Andrei Serjantov}
  12712 }
  12713 @conference {Serj02-iptps,
  12714         title = {Anonymizing censorship resistant systems},
  12715         booktitle = {Proceedings of the 1st International Peer To Peer Systems Workshop (IPTPS 2002)},
  12716         year = {2002},
  12717         month = mar,
  12718         publisher = {Springer-Verlag  London, UK},
  12719         organization = {Springer-Verlag  London, UK},
  12720         abstract = {In this paper we propose a new Peer-to-Peer architecture for a censorship resistant system with user, server and active-server document anonymity as well as efficient document retrieval. The retrieval service is layered on top of an existing Peer-to-Peer infrastructure, which should facilitate its implementation. The key idea is to separate the role of document storers from the machines visible to the users, which makes each individual part of the system less prone to attacks, and therefore to censorship.
  12721 Indeed, if one server has been pressured into removal, the other server administrators may simply follow the precedent and remove the offending content themselves},
  12722         www_section = {anonymity, censorship resistance, P2P},
  12723         isbn = {978-3-540-44179-3},
  12724         doi = {10.1007/3-540-45748-8},
  12725         url = {http://portal.acm.org/citation.cfm?id=687808},
  12726         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Serj02-iptps.pdf},
  12727         author = {Andrei Serjantov}
  12728 }
  12729 @conference {714768,
  12730         title = {Aspects of AMnet Signaling},
  12731         booktitle = {NETWORKING '02: Proceedings of the Second International IFIP-TC6 Networking Conference on Networking Technologies, Services, and Protocols; Performance of Computer and Communication Networks; and Mobile and Wireless Communications},
  12732         year = {2002},
  12733         pages = {1214--1220},
  12734         publisher = {Springer-Verlag},
  12735         organization = {Springer-Verlag},
  12736         address = {London, UK},
  12737         abstract = {AMnet provides a framework for flexible and rapid service creation. It is based on Programmable Networking technologies and uses active nodes (AMnodes) within the network for the provision of individual, application-specific services. To this end, these AMnodes execute service modules that are loadable on-demand and enhance the functionality of intermediate systems without the need of long global standardization processes.
  12738 Placing application-dedicated functionality within the network requires a flexible signaling protocol to discover and announce as well as to establish and maintain the corresponding services. AMnet Signaling was developed for this purpose and will be presented in detail within this paper},
  12739         www_section = {multicast, programmable networks},
  12740         isbn = {3-540-43709-6},
  12741         doi = {10.1007/3-540-47906-6},
  12742         url = {http://www.springerlink.com/content/4j371710765jg14q/},
  12743         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/speer02networking.pdf},
  12744         author = {Speer, Anke and Marcus Schoeller and Thomas Fuhrmann and Martina Zitterbart}
  12745 }
  12746 @conference {beimel-barrier,
  12747         title = {Breaking the $O(n^{1/(2k-1)})$ Barrier for Information-Theoretic Private Information Retrieval},
  12748         booktitle = {Proceedings of the 43rd IEEE Symposium on Foundations of Computer Science (FOCS)},
  12749         year = {2002},
  12750         abstract = {Private Information Retrieval (PIR) protocols allow a user to retrieve a data item from a database while hiding the identity of the item being retrieved. Specifically, in information-theoretic, k-server PIR protocols the database is replicated among k servers, and each server learns nothing about the item the user retrieves. The cost of such protocols is measured by the communication complexity of retrieving one out of n bits of data. For any fixed k, the complexity of the best protocols prior to our work was 0(n^{\frac{1}{{2k--1}}}) (Ambainis, 1997). Since then several methods were developed in an attempt to beat this bound, but all these methods yielded the same asymptotic bound.In this work, this barrier is finally broken and the complexity of information-theoretic k-server PIR is improved to n^{0(\frac{{\log \log k}}{{k\log k}})}. The new PIR protocols can also be used to construct k-query binary locally decodable codes of length exp (n^{0(\frac{{\log \log k}}{{k\log k}})}), compared to exp(n^{\frac{1}{{k--1}}}) in previous constructions. The improvements presented in this paper apply even for small values of k: the PIR protocols are more efficient than previous ones for every k \geqslant 3, and the locally decodable codes are shorter for every k \geqslant 4},
  12751         www_section = {private information retrieval},
  12752         isbn = {0-7695-1822-2},
  12753         url = {http://portal.acm.org/citation.cfm?id=652187},
  12754         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/beimel-barrier.pdf},
  12755         author = {Amos Beimel and Yuval Ishai and Eyal Kushilevitz and Jean-Fran{\c c}ois Raymond}
  12756 }
  12757 @conference {Mazieres:2002:BSF:571825.571840,
  12758         title = {Building secure file systems out of Byzantine storage},
  12759         booktitle = {PODC'02--Proceedings of the 21st Annual Symposium on Principles of Distributed Computing},
  12760         series = {PODC '02},
  12761         year = {2002},
  12762         month = jul,
  12763         pages = {108--117},
  12764         publisher = {ACM},
  12765         organization = {ACM},
  12766         address = {Monterey, CA, USA},
  12767         abstract = {This paper shows how to implement a trusted network file system on an untrusted server. While cryptographic storage techniques exist that allow users to keep data secret from untrusted servers, this work concentrates on the detection of tampering attacks and stale data. Ideally, users of an untrusted storage server would immediately and unconditionally notice any misbehavior on the part of the server. This ideal is unfortunately not achievable. However, we define a notion of data integrity called fork consistency in which, if the server delays just one user from seeing even a single change by another, the two users will never again see one another's changes---a failure easily detectable with on-line communication. We give a practical protocol for a multi-user network file system called SUNDR, and prove that SUNDR offers fork consistency whether or not the server obeys the protocol},
  12768         www_section = {Byzantine storage, detection, secure file system, stale data, tampering attack, trusted network, untrusted server},
  12769         isbn = {1-58113-485-1},
  12770         doi = {http://doi.acm.org/10.1145/571825.571840},
  12771         url = {http://doi.acm.org/10.1145/571825.571840},
  12772         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PODC\%2702\%20-\%20Building\%20secure\%20file\%20systems\%20out\%20of\%20Byzantine\%20storage.pdf},
  12773         author = {David Mazi{\`e}res and Shasha, Dennis}
  12774 }
  12775 @article {Oswald02capacity-achievingsequences,
  12776         title = {Capacity-achieving sequences for the erasure channel},
  12777         journal = {IEEE Trans. Information Theory},
  12778         volume = {48},
  12779         year = {2002},
  12780         month = dec,
  12781         pages = {3017--3028},
  12782         abstract = {This paper starts a systematic study of capacity-achieving sequences of low-density paritycheck codes for the erasure channel. We introduce a class A of analytic functions and develop a procedure to obtain degree distributions for the codes. We showvarious properties of this class which will help us construct new distributions from old ones. We then study certain types of capacity-achieving sequences and introduce new measures for their optimality. For instance, it turns out that the right-regular sequence is capacity-achieving in a much stronger sense than, e.g., the Tornado sequence. This also explains why numerical optimization techniques tend to favor graphs with only one degree of check nodes. Using our methods, we attack the problem of reducing the fraction of degree 2 variable nodes, which has important practical implications. It turns out that one can produce capacity achieving sequences for which this fraction remains below any constant, albeit at the price of slower convergence to capacity},
  12783         www_section = {coding theory, low-density parity-check},
  12784         issn = { 0018-9448 },
  12785         doi = {10.1109/TIT.2002.805067  },
  12786         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.83.6722},
  12787         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.92.7281.pdf},
  12788         author = {Peter Oswald and M. Amin Shokrollahi}
  12789 }
  12790 @conference {cebolla,
  12791         title = {Cebolla: Pragmatic IP Anonymity},
  12792         booktitle = {Proceedings of the 2002 Ottawa Linux Symposium},
  12793         year = {2002},
  12794         month = {June},
  12795         abstract = {Cebolla is an intersection of cryptographic mix networks and the environment of the public Internet. Most of the history of cryptographic mix networks lies in academic attempts to provide anonymity of various sorts to the users of the network. While based on strong cryptographic principles, most attempts have failed to address properties of the public network and the reasonable expectations of most of its users. Cebolla attempts to address this gulf between the interesting research aspects of IP level anonymity and the operational expectations of most uses of the IP network},
  12796         www_section = {anonymity, cryptography},
  12797         url = {http://www.linuxinsight.com/ols2002_cebolla_pragmatic_ip_anonymity.html},
  12798         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cebolla.pdf},
  12799         author = {Zach Brown}
  12800 }
  12801 @booklet {Fiat02censorshipresistant,
  12802         title = {Censorship Resistant Peer-to-Peer Content Addressable Networks},
  12803         year = {2002},
  12804         abstract = {We present a censorship resistant peer-to-peer network for accessing n data items in a network of n nodes. Each search for a data item in the network takes O(log n) time and requires at most O(log2n) messages. Our network is censorship resistant in the sense that even after adversarial removal of an arbitrarily large constant fraction of the nodes in the network, all but an arbitrarily small fraction of the remaining nodes can obtain all but an arbitrarily small fraction of the original data items. The network can be created in a fully distributed fashion. It requires only O(log n) memory in each node. We also give a variant of our scheme that has the property that it is highly spam resistant: an adversary can take over complete control of a constant fraction of the nodes in the network and yet will still be unable to generate spam},
  12805         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.4761\&rep=rep1\&type=pdf},
  12806         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.4761.pdf},
  12807         author = {Amos Fiat and Jared Saia}
  12808 }
  12809 @conference {chaffinch,
  12810         title = {Chaffinch: Confidentiality in the Face of Legal Threats},
  12811         booktitle = {Proceedings of Information Hiding Workshop (IH 2002)},
  12812         year = {2002},
  12813         month = {October},
  12814         publisher = {Springer-Verlag, LNCS 2578},
  12815         organization = {Springer-Verlag, LNCS 2578},
  12816         abstract = {We present the design and rationale of a practical system for passing confidential messages. The mechanism is an adaptation of Rivest's {\textquotedblleft}chaffing and winnowing{\textquotedblright}, which has the legal advantage of using authentication keys to provide privacy.We identify a weakness in Rivest's particular choice of his {\textquotedblleft}package transform{\textquotedblright} as an {\textquotedblleft}all-or-nothing{\textquotedblright} element within his scheme. We extend the basic system to allow the passing of several messages concurrently. Only some of these messages need be divulged under legal duress, the other messages will be plausibly deniable. We show how this system may have some resilience to the type of legal attack inherent in the UK's Regulation of Investigatory Powers (RIP) Act},
  12817         www_section = {legal attack, RIP},
  12818         doi = {10.1007/3-540-36415-3},
  12819         url = {http://portal.acm.org/citation.cfm?id=647598.732024},
  12820         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Chaffinch.pdf},
  12821         author = {Richard Clayton and George Danezis},
  12822         editor = {Fabien Petitcolas}
  12823 }
  12824 @conference {511496,
  12825         title = {Choosing reputable servents in a P2P network},
  12826         booktitle = {WWW '02: Proceedings of the 11th international conference on World Wide Web},
  12827         year = {2002},
  12828         pages = {376--386},
  12829         publisher = {ACM},
  12830         organization = {ACM},
  12831         address = {New York, NY, USA},
  12832         www_section = {credibility, polling protocol, reputation},
  12833         isbn = {1-58113-449-5},
  12834         doi = {10.1145/511446.511496},
  12835         url = {http://portal.acm.org/citation.cfm?id=511496$\#$},
  12836         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/samarati.pdf},
  12837         author = {Cornelli, Fabrizio and Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and Pierangela Samarati}
  12838 }
  12839 @article {571638,
  12840         title = {COCA: A secure distributed online certification authority},
  12841         journal = {ACM Trans. Comput. Syst},
  12842         volume = {20},
  12843         number = {4},
  12844         year = {2002},
  12845         pages = {329--368},
  12846         publisher = {ACM},
  12847         address = {New York, NY, USA},
  12848         abstract = {COCA is a fault-tolerant and secure online certification authority that has been built and deployed both in a local area network and in the Internet. Extremely weak assumptions characterize environments in which COCA's protocols execute correctly: no assumption is made about execution speed and message delivery delays; channels are expected to exhibit only intermittent reliability; and with 3t + 1 COCA servers up to t may be faulty or compromised. COCA is the first system to integrate a Byzantine quorum system (used to achieve availability) with proactive recovery (used to defend against mobile adversaries which attack, compromise, and control one replica for a limited period of time before moving on to another). In addition to tackling problems associated with combining fault-tolerance and security, new proactive recovery protocols had to be developed. Experimental results give a quantitative evaluation for the cost and effectiveness of the protocols},
  12849         www_section = {byzantine fault tolerance, certification authority, denial-of-service, proactive secret-sharing, public key cryptography, threshold cryptography},
  12850         issn = {0734-2071},
  12851         doi = {10.1145/571637.571638},
  12852         url = {http://portal.acm.org/citation.cfm?id=571638$\#$},
  12853         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cocaTOCS.pdf},
  12854         author = {Zhou, Lidong and Schneider, Fred B. and Robbert Van Renesse}
  12855 }
  12856 @conference {Harren:2002:CQD:646334.687945,
  12857         title = {Complex Queries in DHT-based Peer-to-Peer Networks},
  12858   author={Harren, Matthew and Hellerstein, Joseph M and Huebsch, Ryan and Loo, Boon Thau and Shenker, Scott and Stoica, Ion},
  12859         booktitle = {IPTPS'01--Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  12860         series = {IPTPS '01},
  12861         year = {2002},
  12862         month = mar,
  12863         pages = {242--259},
  12864         publisher = {Springer-Verlag},
  12865         organization = {Springer-Verlag},
  12866         address = {Cambridge, MA, USA},
  12867         abstract = {Recently a new generation of P2P systems, offering distributed hash table (DHT) functionality, have been proposed. These systems greatly improve the scalability and exact-match accuracy of P2P systems, but offer only the exact-match query facility. This paper outlines a research agenda for building complex query facilities on top of these DHT-based P2P systems. We describe the issues involved and outline our research plan and current status},
  12868         www_section = {distributed hash table},
  12869         isbn = {3-540-44179-4},
  12870         url = {http://dl.acm.org/citation.cfm?id=646334.687945},
  12871         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2701\%20-\%20Complex\%20queries\%20in\%20DHT-based\%20p2p\%20networks.pdf},
  12872         author = {Harren, Matthew and Hellerstein, Joseph M. and Huebsch, Ryan and Boon Thau Loo and S Shenker and Ion Stoica}
  12873 }
  12874 @conference {Mui:2002:CMT:820745.821158,
  12875         title = {A Computational Model of Trust and Reputation},
  12876         booktitle = {HICSS'02. Proceedings of the 35th Annual Hawaii International Conference on System Sciences},
  12877         series = {HICSS '02},
  12878         year = {2002},
  12879         month = jan,
  12880         pages = {2431--2439 },
  12881         publisher = {IEEE Computer Society},
  12882         organization = {IEEE Computer Society},
  12883         address = {Big Island, Hawaii, USA},
  12884         abstract = {Despite their many advantages, e-businesses lag behind brick and mortar businesses in several fundamental respects. This paper concerns one of these: relationships based on trust and reputation. Recent studies on simple reputation systems for e-Businesses such as eBay have pointed to the importance of such rating systems for deterring moral hazard and encouraging trusting interactions. However, despite numerous studies on trust and reputation systems, few have taken studies across disciplines to provide an integrated account of these concepts and their relationships. This paper first surveys existing literatures on trust, reputation and a related concept: reciprocity. Based on sociological and biological understandings of these concepts, a computational model is proposed. This model can be implemented in a real system to consistently calculate agents' trust and reputation scores},
  12885         www_section = {e-business, moral hazard, reciprocity, reputation, trust},
  12886         isbn = {0-7695-1435-9},
  12887         doi = {http://dx.doi.org/10.1109/HICSS.2002.994181},
  12888         url = {http://dl.acm.org/citation.cfm?id=820745.821158},
  12889         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HICSS\%2702\%20-\%20A\%20computational\%20model\%20of\%20trust\%20and\%20reputation.pdf},
  12890         author = {Lik Mui and Mojdeh Mohtashemi and Ari Halberstadt}
  12891 }
  12892 @conference {2002_0,
  12893         title = {Cooperative Backup System},
  12894         booktitle = {In The USENIX Conf. on File and Storage Technologies},
  12895         year = {2002},
  12896         abstract = {This paper presents the design of a novel backup system built on top of a peer-to-peer architecture with minimal supporting infrastructure. The system can be deployed for both large-scale and small-scale peer-to-peer overlay networks. It allows computers connected to the Internet to back up their data cooperatively. Each computer has a set of partner computers and stores its backup data distributively among those partners. In return, such a way as to achieve both fault-tolerance and high reliability. This form of cooperation poses several interesting technical challenges because these computers have independent failure modes, do not trust each other, and are subject to third party attacks},
  12897         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/elnikety.pdf},
  12898         author = {Sameh Elnikety and Mark Lillibridge and Mike Burrows and Willy Zwaenepoel}
  12899 }
  12900 @conference {715916,
  12901         title = {CPCMS: A Configuration Management System Based on Cryptographic Names},
  12902         booktitle = {Proceedings of the FREENIX Track: 2002 USENIX Annual Technical Conference},
  12903         year = {2002},
  12904         pages = {207--220},
  12905         publisher = {USENIX Association},
  12906         organization = {USENIX Association},
  12907         address = {Berkeley, CA, USA},
  12908         abstract = {CPCMS, the Cryptographically Protected Configuration Management System is a new configuration management system that provides scalability, disconnected commits, and fine-grain access controls. It addresses the novel problems raised by modern open-source development practices, in which projects routinely span traditional organizational boundaries and can involve thousands of participants. CPCMS provides for simultaneous public and private lines of development, with post hoc "publication" of private branches},
  12909         isbn = {1-880446-01-4},
  12910         url = {http://portal.acm.org/citation.cfm?id=715916$\#$},
  12911         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.3184.pdf},
  12912         author = {Shapiro, Jonathan S. and Vanderburgh, John}
  12913 }
  12914 @conference {idemix,
  12915         title = {Design and implementation of the idemix anonymous credential system},
  12916         booktitle = {Proceedings of the 9th ACM conference on Computer and communications security (CCS 2002)},
  12917         year = {2002},
  12918         pages = {21--30},
  12919         publisher = {ACM Press},
  12920         organization = {ACM Press},
  12921         address = {New York, NY, USA},
  12922         abstract = {Anonymous credential systems [8, 9, 12, 24] allow anonymous yet authenticated and accountable transactions between users and service providers. As such, they represent a powerful technique for protecting users' privacy when conducting Internet transactions. In this paper, we describe the design and implementation of an anonymous credential system based on the protocols developed by [6]. The system is based on new high-level primitives and interfaces allowing for easy integration into access control systems. The prototype was realized in Java. We demonstrate its use and some deployment issues with the description of an operational demonstration scenario},
  12923         www_section = {anonymity, anonymous credential system},
  12924         isbn = {1-58113-612-9},
  12925         doi = {http://doi.acm.org/10.1145/586110.586114},
  12926         url = {http://portal.acm.org/citation.cfm?id=586114},
  12927         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/idemix.pdf},
  12928         author = {Jan Camenisch and Els Van Herreweghen}
  12929 }
  12930 @conference {713855,
  12931         title = {Design Evolution of the EROS Single-Level Store},
  12932         booktitle = {ATEC '02: Proceedings of the General Track of the annual conference on USENIX Annual Technical Conference},
  12933         year = {2002},
  12934         pages = {59--72},
  12935         publisher = {USENIX Association},
  12936         organization = {USENIX Association},
  12937         address = {Berkeley, CA, USA},
  12938         abstract = {File systems have (at least) two undesirable characteristics: both the addressing model and the consistency semantics differ from those of memory, leading to a change in programming model at the storage boundary. Main memory is a single flat space of pages with a simple durability (persistence) model: all or nothing. File content durability is a complex function of implementation, caching, and timing. Memory is globally consistent. File systems offer no global consistency model. Following a crash recovery, individual files may be lost or damaged, or may be collectively inconsistent even though they are individually sound},
  12939         www_section = {file systems},
  12940         isbn = {1-880446-00-6},
  12941         url = {http://portal.acm.org/citation.cfm?id=713855$\#$},
  12942         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/storedesign2002.pdf},
  12943         author = {Shapiro, Jonathan S. and Adams, Jonathan}
  12944 }
  12945 @article {Rubenstein:2000:DSC:345063.339410,
  12946         title = {Detecting shared congestion of flows via end-to-end measurement},
  12947         journal = {IEEE/ACM Transactions on Networking},
  12948         volume = {10},
  12949         year = {2002},
  12950         month = jun,
  12951         pages = {381--395},
  12952         publisher = {ACM},
  12953         address = {New York, NY, USA},
  12954         abstract = {Current Internet congestion control protocols operate independently on a per-flow basis. Recent work has demonstrated that cooperative congestion control strategies between flows can improve performance for a variety of applications, ranging from aggregated TCP transmissions to multiple-sender multicast applications. However, in order for this cooperation to be effective, one must first identify the flows that are congested at the same set of resources. We present techniques based on loss or delay observations at end hosts to infer whether or not two flows experiencing congestion are congested at the same network resources. Our novel result is that such detection can be achieved for unicast flows, but the techniques can also be applied to multicast flows. We validate these techniques via queueing analysis, simulation and experimentation within the Internet. In addition, we demonstrate preliminary simulation results that show that the delay-based technique can determine whether two TCP flows are congested at the same set of resources. We also propose metrics that can be used as a measure of the amount of congestion sharing between two flows},
  12955         www_section = {end-to-end measurement, flow, internet congestion protocols, per-flow, shared congestion},
  12956         issn = {1063-6692 },
  12957         doi = {http://dx.doi.org/10.1109/TNET.2002.1012369},
  12958         url = {http://dx.doi.org/10.1109/TNET.2002.1012369},
  12959         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Transactions\%20on\%20Networking\%20-\%20Detecting\%20shared\%20congestion\%20of\%20flows.pdf},
  12960         author = {Rubenstein, Dan and Kurose, Jim and Don Towsley}
  12961 }
  12962 @conference {Feigenbaum:2002:DAM:570810.570812,
  12963         title = {Distributed algorithmic mechanism design: recent results and future directions},
  12964         booktitle = {DIALM'06. Proceedings of the 6th international workshop on Discrete algorithms and methods for mobile computing and communications},
  12965         series = {DIALM '02},
  12966         year = {2002},
  12967         month = sep,
  12968         pages = {1--13},
  12969         publisher = {ACM},
  12970         organization = {ACM},
  12971         address = {Atlanta, Georgia},
  12972         abstract = {Distributed Algorithmic Mechanism Design (DAMD) combines theoretical computer science's traditional focus on computational tractability with its more recent interest in incentive compatibility and distributed computing. The Internet's decentralized nature, in which distributed computation and autonomous agents prevail, makes DAMD a very natural approach for many Internet problems. This paper first outlines the basics of DAMD and then reviews previous DAMD results on multicast cost sharing and interdomain routing. The remainder of the paper describes several promising research directions and poses some specific open problems},
  12973         www_section = {algorithmic mechanism design, algorithms, distributed computation, multicast, routing},
  12974         isbn = {1-58113-587-4},
  12975         doi = {http://doi.acm.org/10.1145/570810.570812},
  12976         url = {http://jmvidal.cse.sc.edu/library/feigenbaum02a.pdf},
  12977         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DIALM\%2702\%20-\%20Feigenbaum\%20\%26\%20Shenker\%20-\%20Distributed\%20algorithmic\%20mechanism\%20design.pdf},
  12978         author = {Feigenbaum, Joan and S Shenker}
  12979 }
  12980 @booklet {Hildrum:CSD-02-1178,
  12981         title = {Distributed Data Location in a Dynamic Network},
  12982         number = {UCB/CSD-02-1178},
  12983         year = {2002},
  12984         month = apr,
  12985         publisher = {EECS Department, University of California, Berkeley},
  12986         abstract = {Modern networking applications replicate data and services widely, leading to a need for location-independent routing -- the ability to route queries directly to objects using names that are independent of the objects' physical locations. Two important properties of a routing infrastructure are routing locality and rapid adaptation to arriving and departing nodes. We show how these two properties can be achieved with an efficient solution to the nearest-neighbor problem. We present a new distributed algorithm that can solve the nearest-neighbor problem for a restricted metric space. We describe our solution in the context of Tapestry, an overlay network infrastructure that employs techniques proposed by Plaxton, Rajaraman, and Richa},
  12987         url = {http://www.eecs.berkeley.edu/Pubs/TechRpts/2002/5214.html},
  12988         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-02-1178.pdf},
  12989         author = {Hildrum, Kirsten and John Kubiatowicz and Rao, Satish and Ben Y. Zhao}
  12990 }
  12991 @article {wagner,
  12992         title = {Don't Shoot the Messenger: Limiting the Liability of Anonymous Remailers},
  12993         journal = {New Mexico Law Review},
  12994         volume = {32},
  12995         number = {Winter},
  12996         year = {2002},
  12997         pages = {99--142},
  12998         abstract = {I will close the remailer for the time being because the legal issues concerning the Internet in Finland are yet undefined. The legal protection of the users needs to be clarified. At the moment the privacy of Internet messages is judicially unclearI have also personally been a target because of the remailer. Unjustified accusations affect both my job and my private life},
  12999         www_section = {privacy},
  13000         url = {https://litigation-essentials.lexisnexis.com/webcd/app?action=DocumentDisplay\&crawlid=1\&doctype=cite\&docid=32+N.M.L.+Rev.+99\&srctype=smi\&srcid=3B15\&key=008c465fa13eb62c9370e4baa5eea0e5},
  13001         author = {Robyn Wagner}
  13002 }
  13003 @conference {langos02,
  13004         title = {Dummy Traffic Against Long Term Intersection Attacks},
  13005         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)},
  13006         year = {2002},
  13007         month = {April},
  13008         publisher = {Springer-Verlag, LNCS 2482},
  13009         organization = {Springer-Verlag, LNCS 2482},
  13010         abstract = {In this paper we propose a method to prevent so called {\textquotedblleft}intersection attacks{\textquotedblright} on anonymity services. Intersection attacks are possible if not all users of such a service are active all the time and part of the transfered messages are linkable. Especially in real systems, the group of users (anonymity set) will change over time due to online and off-line periods.
  13011 Our proposed solution is to send pregenerated dummy messages to the communication partner (e.g. the web server), during the user's off-line periods.
  13012 For a detailed description of our method we assume a cascade of Chaumian MIXes as anonymity service and respect and fulfill the MIX attacker model},
  13013         www_section = {anonymity service, intersection attacks},
  13014         isbn = {978-3-540-00565-0},
  13015         doi = {10.1007/3-540-36467-6},
  13016         url = {http://www.springerlink.com/content/66ybualwu5hmh563/},
  13017         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/langos02.pdf},
  13018         author = {Oliver Berthold and Heinrich Langos},
  13019         editor = {Roger Dingledine and Paul Syverson}
  13020 }
  13021 @conference {camenisch2002da,
  13022         title = {Dynamic Accumulators and Application to Efficient Revocation of Anonymous Credentials},
  13023         booktitle = {Proceedings of CRYPTO 2002},
  13024         year = {2002},
  13025         pages = {61--76},
  13026         publisher = {Springer Verlag, LNCS 2442},
  13027         organization = {Springer Verlag, LNCS 2442},
  13028         abstract = {We introduce the notion of a dynamic accumulator. An accumulator scheme allows one to hash a large set of inputs into one short value, such that there is a short proof that a given input was incorporated into this value. A dynamic accumulator allows one to dynamically add and delete a value, such that the cost of an add or delete is independent of the number of accumulated values. We provide a construction of a dynamic accumulator and an efficient zero-knowledge proof of knowledge of an accumulated value. We prove their security under the strong RSA assumption. We then show that our construction of dynamic accumulators enables efficient revocation of anonymous credentials, and membership revocation for recent group signature and identity escrow schemes},
  13029         www_section = {anonymity, certificate revocation, credentials, dynamic accumulators, group signatures, identity escrow},
  13030         isbn = {978-3-540-44050-5},
  13031         doi = {10.1007/3-540-45708-9},
  13032         url = {http://portal.acm.org/citation.cfm?id=704437},
  13033         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camenisch2002da.pdf},
  13034         author = {Jan Camenisch and Anna Lysyanskaya}
  13035 }
  13036 @booklet {Saia02dynamicallyfault-tolerant,
  13037         title = {Dynamically Fault-Tolerant Content Addressable Networks},
  13038         year = {2002},
  13039         abstract = {We describe a content addressable network which is robust in the face of massive adversarial attacks and in a highly dynamic environment. Our network is robust in the sense that at any time, an arbitrarily large fraction of the peers can reach an arbitrarily large fraction of the data items. The network can be created and maintained in a completely distributed fashion},
  13040         www_section = {fault-tolerance, robustness},
  13041         doi = {10.1007/3-540-45748-8},
  13042         url = {http://www.springerlink.com/content/r7fumjuwmgnd4md1/},
  13043         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/180.pdf},
  13044         author = {Jared Saia and Amos Fiat and Steven D. Gribble and Anna R. Karlin and Stefan Saroiu}
  13045 }
  13046 @conference {esed,
  13047         title = {Efficient Sharing of Encrypted Data},
  13048         booktitle = {Proceedings of ACSIP 2002},
  13049         year = {2002},
  13050         pages = {107--120},
  13051         publisher = {Springer-Verlag},
  13052         organization = {Springer-Verlag},
  13053         address = {Melbourne, Australia},
  13054         www_section = {censorship resistance, ECRS, encoding, file-sharing, GNUnet},
  13055         url = {http://grothoff.org/christian/esed.pdf},
  13056         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/esed.pdf},
  13057         author = {Krista Bennett and Christian Grothoff and Tzvetan Horozov and Ioana Patrascu}
  13058 }
  13059 @article {605408,
  13060         title = {Energy-efficient computing for wildlife tracking: design tradeoffs and early experiences with ZebraNet},
  13061         journal = {SIGARCH Comput. Archit. News},
  13062         volume = {30},
  13063         number = {5},
  13064         year = {2002},
  13065         pages = {96--107},
  13066         publisher = {ACM},
  13067         address = {New York, NY, USA},
  13068         abstract = {Over the past decade, mobile computing and wireless communication have become increasingly important drivers of many new computing applications. The field of wireless sensor networks particularly focuses on applications involving autonomous use of compute, sensing, and wireless communication devices for both scientific and commercial purposes. This paper examines the research decisions and design tradeoffs that arise when applying wireless peer-to-peer networking techniques in a mobile sensor network designed to support wildlife tracking for biology research.The ZebraNet system includes custom tracking collars (nodes) carried by animals under study across a large, wild area; the collars operate as a peer-to-peer network to deliver logged data back to researchers. The collars include global positioning system (GPS), Flash memory, wireless transceivers, and a small CPU; essentially each node is a small, wireless computing device. Since there is no cellular service or broadcast communication covering the region where animals are studied, ad hoc, peer-to-peer routing is needed. Although numerous ad hoc protocols exist, additional challenges arise because the researchers themselves are mobile and thus there is no fixed base station towards which to aim data. Overall, our goal is to use the least energy, storage, and other resources necessary to maintain a reliable system with a very high {\textquoteleft}data homing' success rate. We plan to deploy a 30-node ZebraNet system at the Mpala Research Centre in central Kenya. More broadly, we believe that the domain-centric protocols and energy tradeoffs presented here for ZebraNet will have general applicability in other wireless and sensor applications},
  13069         issn = {0163-5964},
  13070         doi = {10.1145/635506.605408},
  13071         url = {http://portal.acm.org/citation.cfm?id=635506.605408$\#$},
  13072         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/asplos-x_annot.pdf},
  13073         author = {Juang, Philo and Oki, Hidekazu and Wang, Yong and Martonosi, Margaret and Peh, Li Shiuan and Rubenstein, Daniel}
  13074 }
  13075 @conference {687814,
  13076         title = {Erasure Coding Vs. Replication: A Quantitative Comparison},
  13077         booktitle = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  13078         year = {2002},
  13079         pages = {328--338},
  13080         publisher = {Springer-Verlag},
  13081         organization = {Springer-Verlag},
  13082         address = {London, UK},
  13083         abstract = {Peer-to-peer systems are positioned to take advantage of gains in network bandwidth, storage capacity, and computational resources to provide long-term durable storage infrastructures. In this paper, we quantitatively compare building a distributed storage infrastructure that is self-repairing and resilient to faults using either a replicated system or an erasure-resilient system. We show that systems employing erasure codes have mean time to failures many orders of magnitude higher than replicated systems with similar storage and bandwidth requirements. More importantly, erasure-resilient systems use an order of magnitude less bandwidth and storage to provide similar system durability as replicated systems},
  13084         www_section = {distributed storage, erasure coding, P2P},
  13085         isbn = {3-540-44179-4},
  13086         doi = {10.1007/3-540-45748-8},
  13087         url = {http://www.springerlink.com/content/e1kmcf729e6updgm/},
  13088         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2701\%20-\%20Erasure\%20coding\%20vs.\%20replication.pdf},
  13089         author = {Weatherspoon, Hakim and John Kubiatowicz}
  13090 }
  13091 @booklet {citeulike:1360149,
  13092         title = {Experiences Deploying a Large-Scale Emergent Network},
  13093         year = {2002},
  13094         abstract = {Mojo Nation\&quot;w as a netw ork for robust, decentralized file storage and transfer},
  13095         isbn = {3-540-44179-4},
  13096         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.9607},
  13097         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Experiences_Deploying_a_Large-Scale_Emergent_Network.pdf},
  13098         author = {O'Hearn, Bryce W.}
  13099 }
  13100 @conference {Castro02exploitingnetwork_0,
  13101         title = {Exploiting network proximity in distributed hash tables},
  13102         booktitle = {in International Workshop on Future Directions in Distributed Computing (FuDiCo)},
  13103         year = {2002},
  13104         pages = {52--55},
  13105         abstract = {Self-organizing peer-to-peer (p2p) overlay networks like CAN, Chord, Pastry and Tapestry (also called distributed hash tables or DHTs) offer a novel platform for a variety of scalable and decentralized distributed applications. These systems provide efficient and fault-tolerant routing, object location, and load balancing within a self-organizing overlay network. One important aspect of these systems is how they exploit network proximity in the underlying Internet. Three basic approaches have been proposed to exploit network proximity in DHTs, geographic layout, proximity routing and proximity neighbour selection. In this position paper, we briefly discuss the three approaches, contrast their strengths and shortcomings, and consider their applicability
  13106 in the different DHT routing protocols. We conclude that proximity neighbor selection, when used in DHTs with prefixbased routing like Pastry and Tapestry, is highly effective and appears to dominate the other approaches},
  13107         www_section = {CAN, distributed hash table, P2P},
  13108         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.126.3062},
  13109         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fudico.pdf},
  13110         author = {Miguel Castro and Peter Druschel and Y. Charlie Hu}
  13111 }
  13112 @booklet {Castro02exploitingnetwork,
  13113         title = {Exploiting network proximity in peer-to-peer overlay networks},
  13114         year = {2002},
  13115         abstract = {The authors give an overview over various ways to use proximity information to optimize routing in peer-to-peer networks. Their study focuses on Pastry and describe in detail the protocols that are used in Pastry to build routing tables with neighbours that are close in terms of the underlying network. They give some analytical and extensive experimental evidence that the protocols are effective in reducing the length of the routing-path in terms of the link-to-link latency that their implementation uses to measure distance},
  13116         url = {http://www.research.microsoft.com/~antr/PAST/location.ps  },
  13117         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/location.pdf},
  13118         author = {Miguel Castro and Peter Druschel and Y. Charlie Hu and Antony Rowstron}
  13119 }
  13120 @article {Adya:2002:FFA:844128.844130,
  13121         title = {FARSITE: Federated, Available, and Reliable Storage for an Incompletely Trusted Environment},
  13122         journal = {ACM SIGOPS Operating Systems Review},
  13123         volume = {36},
  13124         year = {2002},
  13125         month = dec,
  13126         pages = {1--14},
  13127         publisher = {ACM},
  13128         address = {New York, NY, USA},
  13129         abstract = {Farsite is a secure, scalable file system that logically functions as a centralized file server but is physically distributed among a set of untrusted computers. Farsite provides file availability and reliability through randomized replicated storage; it ensures the secrecy of file contents with cryptographic techniques; it maintains the integrity of file and directory data with a Byzantine-fault-tolerant protocol; it is designed to be scalable by using a distributed hint mechanism and delegation certificates for pathname translations; and it achieves good performance by locally caching file data, lazily propagating file updates, and varying the duration and granularity of content leases. We report on the design of Farsite and the lessons we have learned by implementing much of that design},
  13130         www_section = {centralized file server, farsite, file system, randomized replicaed storage},
  13131         issn = {0163-5980},
  13132         doi = {http://doi.acm.org/10.1145/844128.844130},
  13133         url = {http://doi.acm.org/10.1145/844128.844130},
  13134         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGOPS\%20-\%20FARSITE.pdf},
  13135         author = {Adya, Atul and Bolosky, William J. and Miguel Castro and Cermak, Gerald and Chaiken, Ronnie and John R. Douceur and Howell, Jon and Lorch, Jacob R. and Marvin Theimer and Roger Wattenhofer}
  13136 }
  13137 @conference {Fu:2002:FSD:505452.505453,
  13138         title = {Fast and secure distributed read-only file system},
  13139         booktitle = {OSDI 2000--Proceedings of the 4th USENIX Symposium on Operating Systems Design and Implementation},
  13140         volume = {20},
  13141         year = {2002},
  13142         month = oct,
  13143         pages = {1--24},
  13144         publisher = {ACM},
  13145         organization = {ACM},
  13146         address = {San Diego, CA, USA},
  13147         abstract = {Internet users increasingly rely on publicly available data for everything from software installation to investment decisions. Unfortunately, the vast majority of public content on the Internet comes with no integrity or authenticity guarantees. This paper presents the self-certifying read-only file system, a content distribution system providing secure, scalable access to public, read-only data.
  13148 
  13149 The read-only file system makes the security of published content independent from that of the distribution infrastructure. In a secure area (perhaps off-line), a publisher creates a digitally-signed database out of a file system's contents. The publisher then replicates the database on untrusted content-distribution servers, allowing for high availability. The read-only file system protocol furthermore pushes the cryptographic cost of content verification entirely onto clients, allowing servers to scale to a large number of clients. Measurements of an implementation show that an individual server running on a 550 Mhz Pentium III with FreeBSD can support 1,012 connections per second and 300 concurrent clients compiling a large software package},
  13150         www_section = {file systems, read-only, security},
  13151         issn = {0734-2071},
  13152         doi = {http://doi.acm.org/10.1145/505452.505453},
  13153         url = {http://doi.acm.org/10.1145/505452.505453},
  13154         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OSDI\%2700\%20-\%20Fast\%20and\%20Secure\%20Distributed\%20Read-Only\%20File\%20System.pdf},
  13155         author = {Kevin Fu and Frans M. Kaashoek and David Mazi{\`e}res}
  13156 }
  13157 @conference {hintz02,
  13158         title = {Fingerprinting Websites Using Traffic Analysis},
  13159         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)},
  13160         year = {2002},
  13161         month = {April},
  13162         publisher = {Springer-Verlag, LNCS 2482},
  13163         organization = {Springer-Verlag, LNCS 2482},
  13164         abstract = {I present a traffic analysis based vulnerability in Safe Web, an encrypting web proxy. This vulnerability allows someone monitoring the traffic of a Safe Web user to determine if the user is visiting certain websites. I also describe a successful implementation of the attack. Finally, I discuss methods for improving the attack and for defending against the attack},
  13165         www_section = {traffic analysis, vulnerability},
  13166         isbn = {978-3-540-00565-0},
  13167         doi = {10.1007/3-540-36467-6},
  13168         url = {http://www.springerlink.com/content/c4qwe6d608p2cjyv/},
  13169         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hintz02.pdf},
  13170         author = {Andrew Hintz},
  13171         editor = {Roger Dingledine and Paul Syverson}
  13172 }
  13173 @conference {2002_1,
  13174         title = {Finite-length analysis of low-density parity-check codes on the binary erasure channel},
  13175         booktitle = {Finite-length analysis of low-density parity-check codes on the binary erasure channel},
  13176         year = {2002},
  13177         month = jan,
  13178         abstract = {In this paper, we are concerned with the finite-length analysis of low-density parity-check (LDPC) codes when used over the binary erasure channel (BEC). The main result is an expression for the exact average bit and block erasure probability for a given regular ensemble of LDPC codes when decoded iteratively. We also give expressions for upper bounds on the average bit and block erasure probability for regular LDPC ensembles and the standard random ensemble under maximum-likelihood (ML) decoding. Finally, we present what we consider to be the most important open problems in this area},
  13179         www_section = {BEC, coding theory, low-density parity-check, maximum-likelihood},
  13180         doi = {10.1109/TIT.2002.1003839  },
  13181         url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1003839},
  13182         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Finite-length\%20analysis\%20of\%20low-density\%20parity-check\%20codes\%20on.pdf},
  13183         author = {Changyan Di and David Proietti and I. Emre Telatar and Thomas J. Richardson and R{\"u}diger L. Urbanke}
  13184 }
  13185 @conference {Dan:SFMix03,
  13186         title = {Forward Secure Mixes},
  13187         booktitle = {Proceedings of 7th Nordic Workshop on Secure IT Systems},
  13188         year = {2002},
  13189         month = {November},
  13190         pages = {195--207},
  13191         address = {Karlstad, Sweden},
  13192         abstract = {New threats such as compulsion to reveal logs, secret and private keys as well as to decrypt material are studied in the context of the security of mix networks. After a comparison of this new threat model with the traditional one, a new construction is introduced, the fs-mix, that minimizes the impact that such powers have on the security of the network, by using forward secure communication channels and key updating operation inside the mixes. A discussion about the forward security of these new proposals and some extensions is included},
  13193         www_section = {anonymity, forward security, mix, traffic analysis},
  13194         url = {http://citeseer.ist.psu.edu/533725.html},
  13195         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dan-SFMix03.pdf},
  13196         author = {George Danezis},
  13197         editor = {Fisher-Hubner, Jonsson}
  13198 }
  13199 @conference {trickle02,
  13200         title = {From a Trickle to a Flood: Active Attacks on Several Mix Types},
  13201         booktitle = {Proceedings of Information Hiding Workshop (IH 2002)},
  13202         year = {2002},
  13203         month = {October},
  13204         publisher = {Springer-Verlag, LNCS 2578},
  13205         organization = {Springer-Verlag, LNCS 2578},
  13206         abstract = {The literature contains a variety of different mixes, some of which have been used in deployed anonymity systems. We explore their anonymity and message delay properties, and show how to mount active attacks against them by altering the traffic between the mixes. We show that if certain mixes are used, such attacks cannot destroy the anonymity of a particular message completely. We work out the cost of these attacks in terms of the number of messages the attacker must insert into the network and the time he must spend. We discuss advantages and disadvantages of these mixes and the settings in which their use is appropriate. Finally, we look at dummy traffic and SG mixes as other promising ways of protecting against the attacks, point out potential weaknesses in existing designs, and suggest improvements},
  13207         www_section = {anonymity, attack},
  13208         isbn = {978-3-540-00421-9},
  13209         doi = {10.1007/3-540-36415-3},
  13210         url = {http://www.springerlink.com/content/um0kf3dp88b0eg5v/},
  13211         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/trickle02.pdf},
  13212         author = {Andrei Serjantov and Roger Dingledine and Paul Syverson},
  13213         editor = {Fabien Petitcolas}
  13214 }
  13215 @article {2002_2_GNet,
  13216         title = {The GNet Whitepaper},
  13217         abstract = {This paper describes GNet, a reliable anonymous distributed backup system with reasonable defenses against malicious hosts and low overhead in traffic and CPU time. The system design is described and compared to other publicly used services with similar goals. Additionally, the implementation and the protocols of GNet are presented},
  13218         year = {2002},
  13219         month = jun,
  13220         institution = {Purdue University},
  13221         type = {Technical report},
  13222         www_section = {anonymity, economics, encoding, GNUnet, obsolete database},
  13223         keywords = {anonymity, economics, encoding, GNUnet, obsolete database},
  13224         www_tags = selected,
  13225         journal = {unknown},
  13226         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/main.pdf},
  13227         author = {Krista Bennett and Tiberius Stef and Christian Grothoff and Tzvetan Horozov and Ioana Patrascu}
  13228 }
  13229 @article {Levine:2002,
  13230         title = {Hordes --- A Multicast Based Protocol for Anonymity},
  13231         journal = {Journal of Computer Security},
  13232         volume = {10},
  13233         number = {3},
  13234         year = {2002},
  13235         pages = {213--240},
  13236         abstract = {With widespread acceptance of the Internet as a public medium for communication and information retrieval, there has been rising concern that the personal privacy of users can be eroded by cooperating network entities. A technical solution to maintaining privacy is to provide anonymity. We present a protocol for initiator anonymity called Hordes, which uses forwarding mechanisms similar to those used in previous protocols for sending data, but is the first protocol to make use of multicast routing to anonymously receive data. We show this results in shorter transmission latencies and requires less work of the protocol participants, in terms of the messages processed. We also present a comparison of the security and anonymity of Hordes with previous protocols, using the first quantitative definition of anonymity and unlinkability},
  13237         www_section = {anonymity, Hordes, multicast, routing},
  13238         issn = {0926-227X},
  13239         url = {http://portal.acm.org/citation.cfm?id=603406},
  13240         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Levine-2002.pdf},
  13241         author = {Brian Neil Levine and Clay Shields}
  13242 }
  13243 @conference {DBLP:conf/eurocrypt/RussellW02,
  13244         title = {How to Fool an Unbounded Adversary with a Short Key},
  13245         booktitle = {How to Fool an Unbounded Adversary with a Short Key},
  13246         year = {2002},
  13247         pages = {133--148},
  13248         author = {Alexander Russell and Hong Wang}
  13249 }
  13250 @conference {873217,
  13251         title = {Improving Data Availability through Dynamic Model-Driven Replication in Large Peer-to-Peer Communities},
  13252         booktitle = {CCGRID '02: Proceedings of the 2nd IEEE/ACM International Symposium on Cluster Computing and the Grid},
  13253         year = {2002},
  13254         pages = {0--376},
  13255         publisher = {IEEE Computer Society},
  13256         organization = {IEEE Computer Society},
  13257         address = {Washington, DC, USA},
  13258         abstract = {Efficient data sharing in global peer-to-peer systems is complicated by erratic node failure, unreliable networkconnectivity and limited bandwidth.Replicating data onmultiple nodes can improve availability and response time.Yet determining when and where to replicate data in orderto meet performance goals in large-scale systems withmany users and files, dynamic network characteristics, and changing user behavior is difficult.We propose anapproach in which peers create replicas automatically in a decentralized fashion, as required to meet availabilitygoals.The aim of our framework is to maintain a thresholdlevel of availability at all times.We identify a set of factors that hinder data availabilityand propose a model that decides when more replication isnecessary.We evaluate the accuracy and performance ofthe proposed model using simulations.Our preliminaryresults show that the model is effective in predicting therequired number of replicas in the system},
  13259         www_section = {data sharing, model-driven, P2P},
  13260         isbn = {0-7695-1582-7},
  13261         url = {http://portal.acm.org/citation.cfm?id=873217$\#$},
  13262         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.909.pdf},
  13263         author = {Ranganathan, Kavitha and Iamnitchi, Adriana and Foster, Ian}
  13264 }
  13265 @conference {Feamster02infranet:circumventing,
  13266         title = {Infranet: Circumventing Web Censorship and Surveillance},
  13267         booktitle = {In Proceedings of the 11th USENIX Security Symposium},
  13268         year = {2002},
  13269         pages = {247--262},
  13270         publisher = {Association},
  13271         organization = {Association},
  13272         abstract = {An increasing number of countries and companies routinely block or monitor access to parts of the Internet. To counteract these measures, we propose Infranet, a system that enables clients to surreptitiously retrieve sensitive content via cooperating Web servers distributed across the global Internet. These Infranet servers provide clients access to censored sites while continuing to host normal uncensored content. Infranet uses a tunnel protocol that provides a covert communication channel between its clients and servers, modulated over standard HTTP transactions that resemble innocuous Web browsing. In the upstream direction, Infranet clients send covert messages to Infranet servers by associating meaning to the sequence of HTTP requests being made. In the downstream direction, Infranet servers return content by hiding censored data in uncensored images using steganographic techniques. We describe the design, a prototype implementation, security properties, and performance of Infranet. Our security analysis shows that Infranet can successfully circumvent several sophisticated censoring techniques},
  13273         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.18.5049\&rep=rep1\&type=pdf},
  13274         author = {Nick Feamster and Magdalena Balazinska and Greg Harfst and Hari Balakrishnan and David Karger}
  13275 }
  13276 @conference {infranet,
  13277         title = {Infranet: Circumventing Web Censorship and Surveillance},
  13278         booktitle = {Proceedings of the 11th USENIX Security Symposium},
  13279         year = {2002},
  13280         month = {August},
  13281         publisher = {USENIX Association  Berkeley, CA, USA},
  13282         organization = {USENIX Association  Berkeley, CA, USA},
  13283         abstract = {An increasing number of countries and companies routinely block or monitor access to parts of the Internet. To counteract these measures, we propose Infranet, a system that enables clients to surreptitiously retrieve sensitive content via cooperating Web servers distributed across the global Internet. These Infranet servers provide clients access to censored sites while continuing to host normal uncensored content. Infranet uses a tunnel protocol that provides a covert communication channel between its clients and servers, modulated over standard HTTP transactions that resemble innocuous Web browsing. In the upstream direction, Infranet clients send covert messages to Infranet servers by associating meaning to the sequence of HTTP requests being made. In the downstream direction, Infranet servers return content by hiding censored data in uncensored images using steganographic techniques. We describe the design, a prototype implementation, security properties, and performance of Infranet. Our security analysis shows that Infranet can successfully circumvent several sophisticated censoring techniques},
  13284         www_section = {censorship resistance, infranet},
  13285         isbn = {1-931971-00-5},
  13286         url = {http://portal.acm.org/citation.cfm?id=720281},
  13287         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infranet.pdf},
  13288         author = {Nick Feamster and Magdalena Balazinska and Greg Harfst and Hari Balakrishnan and David Karger}
  13289 }
  13290 @article {Cao:2002:IPG:508325.508330,
  13291         title = {Internet pricing with a game theoretical approach: concepts and examples},
  13292         journal = {IEEE/ACM Trans. Netw},
  13293         volume = {10},
  13294         year = {2002},
  13295         month = apr,
  13296         pages = {208--216},
  13297         publisher = {IEEE Press},
  13298         address = {Piscataway, NJ, USA},
  13299         abstract = {The basic concepts of three branches of game theory, leader-follower, cooperative, and two-person nonzero sum games, are reviewed and applied to the study of the Internet pricing issue. In particular, we emphasize that the cooperative game (also called the bargaining problem) provides an overall picture for the issue. With a simple model for Internet quality of service (QoS), we demonstrate that the leader-follower game may lead to a solution that is not Pareto optimal and in some cases may be "unfair," and that the cooperative game may provide a better solution for both the Internet service provider (ISP) and the user. The practical implication of the results is that government regulation or arbitration may be helpful. The QoS model is also applied to study the competition between two ISPs, and we find a Nash equilibrium point from which the two ISPs would not move out without cooperation. The proposed approaches can be applied to other Internet pricing problems such as the Paris Metro pricing scheme},
  13300         www_section = {bargaining problems, cooperative games, leader-follower games, Paris metro pricing, quality of services, two-person nonzero sum games},
  13301         issn = {1063-6692},
  13302         url = {http://dl.acm.org/citation.cfm?id=508325.508330},
  13303         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Trans.\%20Netw.\%2702\%20\%2810\%29-\%20Internet\%20pricing.pdf},
  13304         author = {Cao, Xi-Ren and Shen, Hong-Xia and Milito, Rodolfo and Wirth, Patrica}
  13305 }
  13306 @conference {stepping-stones,
  13307         title = {Inter-Packet Delay Based Correlation for Tracing Encrypted Connections through Stepping Stones},
  13308         booktitle = {Proceedings of ESORICS 2002},
  13309         year = {2002},
  13310         month = {October},
  13311         pages = {244--263},
  13312         publisher = {Springer Berlin / Heidelberg},
  13313         organization = {Springer Berlin / Heidelberg},
  13314         abstract = {Network based intrusions have become a serious threat to the users of the Internet. Intruders who wish to attack computers attached to the Internet frequently conceal their identity by staging their attacks through intermediate {\textquotedblleft}stepping stones{\textquotedblright}. This makes tracing the source of the attack substantially more difficult, particularly if the attack traffic is encrypted. In this paper, we address the problem of tracing encrypted connections through stepping stones. The incoming and outgoing connections through a stepping stone must be correlated to accomplish this. We propose a novel correlation scheme based on inter-packet timing characteristics of both encrypted and unencrypted connections. We show that (after some filtering) inter-packet delays (IPDs) of both encrypted and unencrypted, interactive connections are preserved across many router hops and stepping stones. The effectiveness of this method for correlation purposes also requires that timing characteristics be distinctive enough to identify connections. We have found that normal interactive connections such as telnet, SSH and rlogin are almost always distinctive enough to provide correct correlation across stepping stones. The number of packets needed to correctly correlate two connections is also an important metric, and is shown to be quite modest for this method},
  13315         www_section = {inter-packet delay, tracing},
  13316         isbn = {978-3-540-44345-2},
  13317         doi = {10.1007/3-540-45853-0},
  13318         url = {http://portal.acm.org/citation.cfm?id=699363},
  13319         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2002-08-esorics02-ipd-correlation.pdf},
  13320         author = {Xinyuan Wang and Douglas S. Reeves and S. Felix Wu}
  13321 }
  13322 @conference {morphmix:wpes2002,
  13323         title = {Introducing MorphMix: Peer-to-Peer based Anonymous Internet Usage with Collusion Detection},
  13324         booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2002)},
  13325         year = {2002},
  13326         month = {November},
  13327         publisher = {ACM  New York, NY, USA},
  13328         organization = {ACM  New York, NY, USA},
  13329         address = {Washington, DC, USA},
  13330         abstract = {Traditional mix-based systems are composed of a small set of static, well known, and highly reliable mixes. To resist traffic analysis attacks at a mix, cover traffic must be used, which results in significant bandwidth overhead. End-to-end traffic analysis attacks are even more difficult to counter because there are only a few entry-and exit-points in the system. Static mix networks also suffer from scalability problems and in several countries, institutions operating a mix could be targeted by legal attacks. In this paper, we introduce MorphMix, a system for peer-to-peer based anonymous Internet usage. Each MorphMix node is a mix and anyone can easily join the system. We believe that MorphMix overcomes or reduces several drawbacks of static mix networks. In particular, we argue that our approach offers good protection from traffic analysis attacks without employing cover traffic. But MorphMix also introduces new challenges. One is that an adversary can easily operate several malicious nodes in the system and try to break the anonymity of legitimate users by getting full control over their anonymous paths. To counter this attack, we have developed a collusion detection mechanism, which allows to identify compromised paths with high probability before they are being used},
  13331         www_section = {collusion detection, legal attack, P2P, traffic analysis},
  13332         isbn = {1-58113-633-1},
  13333         doi = {10.1145/644527.644537},
  13334         url = {http://portal.acm.org/citation.cfm?id=644537},
  13335         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphmix-wpes2002.pdf},
  13336         author = {Marc Rennhard and Bernhard Plattner}
  13337 }
  13338 @booklet {Freedman02introducingtarzan,
  13339         title = {Introducing Tarzan, a Peer-to-Peer Anonymizing Network Layer},
  13340         journal = {Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  13341         volume = { Vol. 2429},
  13342         year = {2002},
  13343         pages = {121--129 },
  13344         abstract = {We introduce Tarzan, a peer-to-peer anonymous network layer that provides generic IP forwarding. Unlike prior anonymizing layers, Tarzan is flexible, transparent, decentralized, and highly scalable. Tarzan achieves these properties by building anonymous IP tunnels between an open-ended set of peers. Tarzan can provide anonymity to existing applications, such as web browsing and file sharing, without change to those applications. Performance tests show that Tarzan imposes minimal overhead over a corresponding non-anonymous overlay route},
  13345         isbn = {3-540-44179-4},
  13346         url = {http://www.cs.rice.edu/Conferences/IPTPS02/182.pdf},
  13347         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tarzan.pdf},
  13348         author = {Michael J. Freedman and Emil Sit and Josh Cates and Robert Morris}
  13349 }
  13350 @booklet {646334,
  13351         title = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  13352   author = {TODO},
  13353         year = {2002},
  13354         publisher = {Springer-Verlag},
  13355         address = {London, UK},
  13356         isbn = {3-540-44179-4},
  13357         url = {http://portal.acm.org/citation.cfm?id=646334$\#$},
  13358         editor = {Peter Druschel and Frans M. Kaashoek and Antony Rowstron}
  13359 }
  13360 @conference {Muthitacharoen02ivy:a,
  13361         title = {Ivy: A Read/Write Peer-to-Peer File System},
  13362         booktitle = {Ivy: A Read/Write Peer-to-Peer File System},
  13363         year = {2002},
  13364         pages = {31--44},
  13365         abstract = {Ivy is a multi-user read/write peer-to-peer file system. Ivy has no centralized or dedicated components, and it provides useful integrity properties without requiring users to fully trust either the underlying peer-to-peer storage system or the other users of the file system},
  13366         www_section = {distributed storage, P2P},
  13367         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.20.2147},
  13368         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.20.2147.pdf},
  13369         author = {Muthitacharoen, Athicha and Robert Morris and Thomer M. Gil and Bengie Chen}
  13370 }
  13371 @conference {Maymounkov02kademlia:a,
  13372         title = {Kademlia: A Peer-to-peer Information System Based on the XOR Metric},
  13373         booktitle = {IPTPS '01--Revised Papers from the First International Workshop on Peer-to-Peer System},
  13374         series = {Lecture Notes in Computer Science},
  13375         volume = {2429},
  13376         year = {2002},
  13377         month = mar,
  13378         pages = {53--65},
  13379         publisher = {Springer-Verlag},
  13380         organization = {Springer-Verlag},
  13381         address = {Cambridge, MA, USA},
  13382         abstract = {We describe a peer-to-peer distributed hash table with provable consistency and performance in a fault-prone environment. Our system routes queries and locates nodes using a novel XOR-based metric topology that simplifies the algorithm and facilitates our proof. The topology has the property that every message exchanged conveys or reinforces useful contact information. The system exploits this information to send parallel, asynchronous query messages that tolerate node failures without imposing timeout delays on users},
  13383         www_section = {distributed hash table, fault-tolerance, Kademlia, P2P},
  13384         isbn = {3-540-44179-4},
  13385         doi = {10.1007/3-540-45748-8_5},
  13386         url = {http://www.springerlink.com/content/2ekx2a76ptwd24qt/},
  13387         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kpos_0.pdf},
  13388         author = {Petar Maymounkov and David Mazi{\`e}res}
  13389 }
  13390 @article {DBLP:journals/ijufks/Sweene02,
  13391         title = {k-Anonymity: A Model for Protecting Privacy},
  13392         journal = {International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems},
  13393         volume = {10},
  13394         number = {5},
  13395         year = {2002},
  13396         pages = {557--570},
  13397         author = {Latanya Sweeney}
  13398 }
  13399 @conference {Eschenauer02akey-management,
  13400         title = {A Key-Management Scheme for Distributed Sensor Networks},
  13401         booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications Security},
  13402         year = {2002},
  13403         pages = {41--47},
  13404         publisher = {ACM Press},
  13405         organization = {ACM Press},
  13406         abstract = {Distributed Sensor Networks (DSNs) are ad-hoc mobile networks that include sensor nodes with limited computation and communication capabilities. DSNs are dynamic in the sense that they allow addition and deletion of sensor nodes after deployment to grow the network or replace failing and unreliable nodes. DSNs may be deployed in hostile areas where communication is monitored and nodes are subject to capture and surreptitious use by an adversary. Hence DSNs require cryptographic protection of communications, sensorcapture detection, key revocation and sensor disabling. In this paper, we present a key-management scheme designed to satisfy both operational and security requirements of DSNs},
  13407         www_section = {DNS, mobile Ad-hoc networks},
  13408         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.9193},
  13409         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.9193.pdf},
  13410         author = {Laurent Eschenauer and Virgil D. Gligor}
  13411 }
  13412 @conference {limits-open,
  13413         title = {Limits of Anonymity in Open Environments},
  13414         booktitle = {Proceedings of Information Hiding Workshop (IH 2002)},
  13415         year = {2002},
  13416         month = {October},
  13417         publisher = {Springer-Verlag, LNCS 2578},
  13418         organization = {Springer-Verlag, LNCS 2578},
  13419         abstract = {A user is only anonymous within a set of other users. Hence, the core functionality of an anonymity providing technique is to establish an anonymity set. In open environments, such as the Internet, the established anonymity sets in the whole are observable and change with every anonymous communication. We use this fact of changing anonymity sets and present a model where we can determine the protection limit of an anonymity technique, i.e. the number of observations required for an attacker to break uniquely a given anonymity technique. In this paper, we use the popular MIX method to demonstrate our attack. The MIX method forms the basis of most of the today's deployments of anonymity services (e.g. Freedom, Onion Routing, Webmix). We note that our approach is general and can be applied equally well to other anonymity providing techniques},
  13420         www_section = {anonymity measurement, attack, mix},
  13421         url = {http://portal.acm.org/citation.cfm?id=731881},
  13422         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/limits-open.pdf},
  13423         author = {Dogan Kesdogan and Dakshi Agrawal and Stefan Penz},
  13424         editor = {Fabien Petitcolas}
  13425 }
  13426 @conference {Halevy:2002:LBE:646767.704291,
  13427         title = {The LSD Broadcast Encryption Scheme},
  13428         booktitle = {CRYPTO'02--Proceedings of the 22nd Annual International Cryptology Conference on Advances in Cryptology},
  13429         series = {Lecture Notes in Computer Science},
  13430         year = {2002},
  13431         month = aug,
  13432         pages = {47--60},
  13433         publisher = {Springer-Verlag},
  13434         organization = {Springer-Verlag},
  13435         address = {Santa Barbara, CA, USA},
  13436         abstract = {Broadcast Encryption schemes enable a center to broadcast encrypted programs so that only designated subsets of users can decrypt each program. The stateless variant of this problem provides each user with a fixed set of keys which is never updated. The best scheme published so far for this problem is the "subset difference" (SD) technique of Naor Naor and Lotspiech, in which each one of the n users is initially given O(log2(n)) symmetric encryption keys. This allows the broadcaster to define at a later stage any subset of up to r users as "revoked", and to make the program accessible only to their complement by sending O(r) short messages before the encrypted program, and asking each user to perform an O(log(n)) computation. In this paper we describe the "Layered Subset Difference" (LSD) technique, which achieves the same goal with O(log1+{\textquestiondown}(n)) keys, O(r) messages, and O(log(n)) computation. This reduces the number of keys given to each user by almost a square root factor without affecting the other parameters. In addition, we show how to use the same LSD keys in order to address any subset defined by a nested combination of inclusion and exclusion conditions with a number of messages which is proportional to the complexity of the description rather than to the size of the subset. The LSD scheme is truly practical, and makes it possible to broadcast an unlimited number of programs to 256,000,000 possible customers by giving each new customer a smart card with one kilobyte of tamper-resistant memory. It is then possible to address any subset defined by t nested inclusion and exclusion conditions by sending less than 4t short messages, and the scheme remains secure even if all the other users form an adversarial coalition},
  13437         www_section = {broadcast encryption scheme, encryption, LSD},
  13438         isbn = {3-540-44050-X},
  13439         url = {http://dl.acm.org/citation.cfm?id=646767.704291},
  13440         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CRYPTO\%2702\%20-\%20The\%20LSD\%20broadcast\%20encryption\%20scheme.pdf},
  13441         author = {Halevy, Dani and Shamir, Adi}
  13442 }
  13443 @article {10.1109/SFCS.2002.1181950,
  13444         title = {LT Codes},
  13445         journal = {Foundations of Computer Science, Annual IEEE Symposium on},
  13446         year = {2002},
  13447         pages = {0--271},
  13448         publisher = {IEEE Computer Society},
  13449         address = {Los Alamitos, CA, USA},
  13450         abstract = {We introduce LT codes, the first rateless erasure codes that are very efficient as the data length grows},
  13451         www_section = {coding theory},
  13452         isbn = {0-7695-1822-2},
  13453         issn = {0272-5428},
  13454         doi = {10.1109/SFCS.2002.1181950},
  13455         url = {http://www.computer.org/portal/web/csdl/abs/proceedings/focs/2002/1822/00/18220271abs.htm},
  13456         author = {Luby, Michael}
  13457 }
  13458 @conference {randomized-checking,
  13459         title = {Making mix nets robust for electronic voting by randomized partial checking},
  13460         booktitle = {Proceedings of the 11th USENIX Security Symposium},
  13461         year = {2002},
  13462         month = {August},
  13463         publisher = {USENIX Association  Berkeley, CA, USA},
  13464         organization = {USENIX Association  Berkeley, CA, USA},
  13465         abstract = { We propose a new technique for making mix nets robust, called randomized partial checking (RPC). The basic idea is that rather than providing a proof of completely correct operation, each server provides strong evidence of its correct operation by revealing a pseudo-randomly selected subset of its input/output relations.
  13466 Randomized partial checking is exceptionally efficient compared to previous proposals for providing robustness; the evidence provided at each layer is shorter than the output of that layer, and producing the evidence is easier than doing the mixing. It works with mix nets based on any encryption scheme (i.e., on public-key alone, and on hybrid schemes using public-key/symmetric-key combinations). It also works both with Chaumian mix nets where the messages are successively encrypted with each server's key, and with mix nets based on a single public key with randomized re-encryption at each layer.
  13467 Randomized partial checking is particularly well suited for voting systems, as it ensures voter privacy and provides assurance of correct operation. Voter privacy is ensured (either probabilistically or cryptographically) with appropriate design and parameter selection. Unlike previous work, our work provides voter privacy as a global property of the mix net rather than as a property ensured by a single honest server. RPC-based mix nets also provide high assurance of a correct election result, since a corrupt server is very likely to be caught if it attempts to tamper with even a couple of ballots},
  13468         www_section = {electronic voting, public verifiability, randomized partial checking, shuffle network},
  13469         isbn = {1-931971-00-5},
  13470         url = {http://portal.acm.org/citation.cfm?id=647253.720294},
  13471         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/randomized-checking.pdf},
  13472         author = {Jakobsson, Markus and Ari Juels and Ron Rivest}
  13473 }
  13474 @article {Thomas:2002:MAO:767821.769444,
  13475         title = {A Market-Based Approach to Optimal Resource Allocation in Integrated-Services Connection-Oriented Networks},
  13476         journal = {Operations Research},
  13477         volume = {50},
  13478         number = {4},
  13479         year = {2002},
  13480         month = jul,
  13481         pages = {603--616},
  13482         publisher = {INFORMS},
  13483         address = {Institute for Operations Research and the Management Sciences (INFORMS), Linthicum, Maryland, USA},
  13484         abstract = {We present an approach to the admission control and resource allocation problem in connection-oriented networks that offer multiple services to users. Users' preferences are summarized by means of their utility functions, and each user is allowed to request more than one type of service. Multiple types of resources are allocated at each link along the path of a connection. We assume that the relation between Quality of Service (QoS) and resource allocation is given, and we incorporate it as a constraint into a static optimization problem. The objective of the optimization problem is to determine the amount of and required resources for each type of service to maximize the sum of the users' utilities. We prove the existence of a solution of the optimization problem and describe a competitive market economy that implements the solution and satisfies the informational constraints imposed by the nature of the decentralized resource allocation problem. The economy consists of four different types of agents: resource providers, service providers, users, and an auctioneer that regulates the prices based on the observed aggregate excess demand. The goods that are sold are: (i) the resources at each link of the network, and (ii) services constructed from these resources and then delivered to users. We specify an iterative procedure that is used by the auctioneer to update the prices, and we show that it leads to an allocation that is arbitrarily close to a solution of the optimization problem in a finite number of iterations},
  13485         www_section = {algorithms, economics, integrated-services networks, network, nonlinear, pricing schemes, programming, resource allocation},
  13486         issn = {0030-364X},
  13487         doi = {10.1287/opre.50.4.603.2862},
  13488         url = {http://dx.doi.org/10.1287/opre.50.4.603.2862},
  13489         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Oper.\%20Res.\%20-\%20Optimal\%20Resource\%20Allocation.pdf},
  13490         author = {Thomas, Panagiotis and Teneketzis, Demosthenis and MacKie-Mason, Jeffrey K.}
  13491 }
  13492 @conference {Saroiu02ameasurement,
  13493         title = {A Measurement Study of Peer-to-Peer File Sharing Systems},
  13494         booktitle = { Multimedia Computing and Networking (MMCN),},
  13495         year = {2002},
  13496         month = jan,
  13497         address = {San Jose},
  13498         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.61.4223\&rep=rep1\&type=pdf},
  13499         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mmcn.pdf},
  13500         author = {Stefan Saroiu and P. Krishna Gummadi and Steven D. Gribble}
  13501 }
  13502 @booklet { dwork02memorybound,
  13503         title = {On memory-bound functions for fighting spam},
  13504         year = {2002},
  13505         abstract = {In 1992, Dwork and Naor proposed that e-mail messages be accompanied by easy-to-check proofs of computational effort in order to discourage junk e-mail, now known as spam. They proposed specific CPU-bound functions for this purpose. Burrows suggested that, since memory access speeds vary across machines much less than do CPU speeds, memory-bound functions may behave more equitably than CPU-bound functions; this approach was first explored by Abadi, Burrows, Manasse, and Wobber [5].
  13506 We further investigate this intriguing proposal. Specifically, we
  13507 1) Provide a formal model of computation and a statement of the problem;
  13508 2) Provide an abstract function and prove an asymptotically tight amortized lower bound on the number of memory accesses required to compute an acceptable proof of effort; specifically, we prove that, on average, the sender of a message must perform many unrelated accesses to memory, while the receiver, in order to verify the work, has to perform significantly fewer accesses;
  13509 3) Propose a concrete instantiation of our abstract function, inspired by the RC4 stream cipher;
  13510 4) Describe techniques to permit the receiver to verify the computation with no memory accesses;
  13511 5) Give experimental results showing that our concrete memory-bound function is only about four times slower on a 233 MHz settop box than on a 3.06 GHz workstation, and that speedup of the function is limited even if an adversary knows the access sequence and uses optimal off-line cache replacement},
  13512         doi = {10.1007/b11817},
  13513         url = {citeseer.ist.psu.edu/dwork02memorybound.html},
  13514         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/memory-bound-crypto.pdf},
  13515         author = {Cynthia Dwork and Andrew Goldberg and Moni Naor}
  13516 }
  13517 @book {2002_3,
  13518         title = {Mnemosyne: Peer-to-Peer Steganographic Storage},
  13519         booktitle = {Peer-to-Peer Systems},
  13520         series = {Lecture Notes in Computer Science},
  13521         volume = {2429},
  13522         year = {2002},
  13523         pages = {130--140},
  13524         publisher = {Springer Berlin Heidelberg},
  13525         organization = {Springer Berlin Heidelberg},
  13526         isbn = {978-3-540-44179-3},
  13527         doi = {10.1007/3-540-45748-8_13},
  13528         url = {http://dx.doi.org/10.1007/3-540-45748-8_13},
  13529         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/107.pdf},
  13530         author = {Hand, Steven and Roscoe, Timothy},
  13531         editor = {Druschel, Peter and Kaashoek, Frans and Rowstron, Antony}
  13532 }
  13533 @booklet {Maymounkov02onlinecodes,
  13534         title = {Online codes (Extended Abstract)},
  13535         year = {2002},
  13536         abstract = {We introduce online codes -- a class of near-optimal codes for a very general loss channel which we call the free channel. Online codes are linear encoding/decoding time codes, based on sparse bipartite graphs, similar to Tornado codes, with a couple of novel properties: local encodability and rateless-ness. Local encodability is the property that each block of the encoding of a message can be computed independently from the others in constant time. This also implies that each encoding block is only dependent on a constant-sized part of the message and a few preprocessed bits. Rateless-ness is the property that each message has an encoding of practically infinite size. We argue that rateless codes are more appropriate than fixed-rate codes for most situations where erasure codes were considered a solution. Furthermore, rateless codes meet new areas of application, where they are not replaceable by fixed-rate codes. One such area is information dispersal over peer-to-peer networks},
  13537         www_section = {coding theory, local encodability, rateless-ness, sparse bipartite graphs},
  13538         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.112.1333},
  13539         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.112.1333.pdf},
  13540         author = {Petar Maymounkov}
  13541 }
  13542 @article {Sherwood_p5:a,
  13543         title = {P5: A Protocol for Scalable Anonymous Communication},
  13544         journal = {Journal of Computer Security},
  13545         volume = {Volume 13 ,},
  13546         year = {2002},
  13547         month = dec,
  13548         pages = {839--876},
  13549         publisher = {IOS Press  Amsterdam, The Netherlands},
  13550         abstract = {We present a protocol for anonymous communication over the Internet. Our protocol, called P (Peer-to-Peer Personal Privacy Protocol) provides sender-, receiver-, and sender-receiver anonymity. P is designed to be implemented over the current Internet protocols, and does not require any special infrastructure support. A novel feature of P is that it allows individual participants to trade-off degree of anonymity for communication efficiency, and hence can be used to scalably implement large anonymous groups. We present a description of P , an analysis of its anonymity and communication efficiency, and evaluate its performance using detailed packet-level simulations},
  13551         url = { http://www.cs.umd.edu/projects/p5/p5.pdf},
  13552         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p5.pdf},
  13553         author = {Rob Sherwood and Bobby Bhattacharjee and Aravind Srinivasan}
  13554 }
  13555 @booklet {Cox02pastiche:making,
  13556         title = {Pastiche: Making Backup Cheap and Easy},
  13557         year = {2002},
  13558         abstract = {Backup is cumbersome and expensive. Individual users almost never back up their data, and backup is a significant cost in large organizations. This paper presents Pastiche, a simple and inexpensive backup system. Pastiche exploits excess disk capacity to perform peer-to-peer backup with no administrative costs. Each node minimizes storage overhead by selecting peers that share a significant amount of data. It is easy for common installations to find suitable peers, and peers with high overlap can be identified with only hundreds of bytes. Pastiche provides mechanisms for confidentiality, integrity, and detection of failed or malicious peers. A Pastiche prototype suffers only 7.4\% overhead for a modified Andrew Benchmark, and restore performance is comparable to cross-machine copy},
  13559         www_section = {backup, P2P},
  13560         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.15.3254},
  13561         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.15.3254.pdf},
  13562         author = {Landon P. Cox and Christopher D. Murray and Brian D. Noble}
  13563 }
  13564 @conference {513828,
  13565         title = {Performance analysis of the CONFIDANT protocol},
  13566         booktitle = {MobiHoc '02: Proceedings of the 3rd ACM international symposium on Mobile ad hoc networking \& computing},
  13567         year = {2002},
  13568         pages = {226--236},
  13569         publisher = {ACM},
  13570         organization = {ACM},
  13571         address = {New York, NY, USA},
  13572         abstract = {Mobile ad-hoc networking works properly only if the participating nodes cooperate in routing and forwarding. However,it may be advantageous for individual nodes not to cooperate. We propose a protocol, called CONFIDANT, for making misbehavior unattractive; it is based on selective altruism and utilitarianism. It aims at detecting and isolating misbehaving nodes, thus making it unattractive to deny cooperation. Trust relationships and routing decisions are based on experienced, observed, or reported routing and forwarding behavior of other nodes. The detailed implementation of CONFIDANT in this paper assumes that the network layer is based on the Dynamic Source Routing (DSR) protocol. We present a performance analysis of DSR fortified by CONFIDANT and compare it to regular defenseless DSR. It shows that a network with CONFIDANT and up to 60\% of misbehaving nodes behaves almost as well as a benign network, in sharp contrast to a defenseless network. All simulations have been implemented and performed in GloMoSim},
  13573         www_section = {cooperation, fairness, mobile Ad-hoc networks, reputation, robustness, routing, trust},
  13574         isbn = {1-58113-501-7},
  13575         doi = {10.1145/513800.513828},
  13576         url = {http://portal.acm.org/citation.cfm?id=513828$\#$},
  13577         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BucheggerL02.pdf},
  13578         author = {Sonja Buchegger and Jean-Yves Le Boudec}
  13579 }
  13580 @booklet {Minsky02practicalset,
  13581         title = {Practical Set Reconciliation},
  13582         year = {2002},
  13583         www_section = {set reconciliation},
  13584         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/practical.pdf},
  13585         author = {Yaron Minsky and Ari Trachtenberg}
  13586 }
  13587 @conference {fiveyearslater,
  13588         title = {Privacy-enhancing technologies for the Internet, II: Five years later},
  13589         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)},
  13590         year = {2002},
  13591         month = {April},
  13592         publisher = {Springer-Verlag, LNCS 2482},
  13593         organization = {Springer-Verlag, LNCS 2482},
  13594         abstract = {Five years ago, {\textquotedblleft}Privacy-enhancing technologies for the Internet{\textquotedblright} [23] examined the state of the then newly emerging privacy-enhancing technologies. In this survey paper, we look back at the last five years to see what has changed, what has stagnated, what has succeeded, what has failed, and why. We also look at current trends with a view towards the future},
  13595         www_section = {privacy},
  13596         isbn = {978-3-540-00565-0},
  13597         doi = {10.1007/3-540-36467-6},
  13598         url = {http://www.springerlink.com/content/740p21gl5a9f640m/},
  13599         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/petfive.pdf},
  13600         author = {Ian Goldberg},
  13601         editor = {Roger Dingledine and Paul Syverson}
  13602 }
  13603 @conference {c.rhea:probabilistic,
  13604         title = {Probabilistic Location and Routing},
  13605         booktitle = {INFOCOM'02. Proceedings of the 21th Annual Joint Conference of the IEEE Computer and Communications Societies},
  13606         year = {2002},
  13607         month = jun,
  13608         pages = {-1--1},
  13609         publisher = {IEEE Computer Society},
  13610         organization = {IEEE Computer Society},
  13611         address = {New York, NY, USA},
  13612         abstract = {We propose probabilistic location to enhance the performance of existing peer-to-peer location mechanisms in the case where a replica for the queried data item exists close to the query source. We introduce the attenuated Bloom filter, a lossy distributed index data structure. We describe how to use these data structures for document location and how to maintain them despite document motion. We include a detailed performance study which indicates that our algorithm performs as desired, both finding closer replicas and finding them faster than deterministic algorithms alone},
  13613         www_section = {Bloom filter, document location, document motion, probabilistic location},
  13614         isbn = {0-7803-7476-2 },
  13615         doi = {http://dx.doi.org/10.1109/INFCOM.2002.1019375},
  13616         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2702\%20-\%20Probabilistic\%20location\%20and\%20routing.pdf},
  13617         author = {Rhea, Sean C. and John Kubiatowicz}
  13618 }
  13619 @conference {586136,
  13620         title = {Query-flood DoS attacks in gnutella},
  13621         booktitle = {CCS '02: Proceedings of the 9th ACM conference on Computer and communications security},
  13622         year = {2002},
  13623         pages = {181--192},
  13624         publisher = {ACM},
  13625         organization = {ACM},
  13626         address = {New York, NY, USA},
  13627         abstract = {We describe a simple but effective traffic model that can be used to understand the effects of denial-of-service (DoS) attacks based on query floods in Gnutella networks. We run simulations based on the model to analyze how different choices of network topology and application level load balancing policies can minimize the effect of these types of DoS attacks. In addition, we also study how damage caused by query floods is distributed throughout the network, and how application-level policies can localize the damage},
  13628         www_section = {denial-of-service, P2P},
  13629         isbn = {1-58113-612-9},
  13630         doi = {10.1145/586110.586136},
  13631         url = {http://portal.acm.org/citation.cfm?id=586110.586136$\#$},
  13632         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p115-daswani_0.pdf},
  13633         author = {Daswani, Neil and Hector Garcia-Molina}
  13634 }
  13635 @conference {Douceur:2002:RSD:850928.851884,
  13636         title = {Reclaiming Space from Duplicate Files in a Serverless Distributed File System},
  13637         booktitle = {ICDCS'02--Proceedings of the 22nd International Conference on Distributed Computing Systems (ICDCS'02)},
  13638         series = {ICDCS '02},
  13639         year = {2002},
  13640         month = jul,
  13641         pages = {0--617},
  13642         publisher = {IEEE Computer Society},
  13643         organization = {IEEE Computer Society},
  13644         address = {Vienna, Austria},
  13645         abstract = {The Farsite distributed file system provides availability by replicating each file onto multiple desktop computers. Since this replication consumes significant storage space, it is important to reclaim used space where possible. Measurement of over 500 desktop file systems shows that nearly half of all consumed space is occupied by duplicate files. We present a mechanism to reclaim space from this incidental duplication to make it available for controlled file replication. Our mechanism includes: (1) convergent encryption, which enables duplicate files to be coalesced into the space of a single file, even if the files are encrypted with different users' keys; and (2) SALAD, a Self-Arranging Lossy Associative Database for aggregating file content and location information in a decentralized, scalable, fault-tolerant manner. Large-scale simulation experiments show that the duplicate-file coalescing system is scalable, highly effective, and fault-tolerant},
  13646         www_section = {convergent encryption, distributed file system, duplicate files, farsite, SALAD, serverless},
  13647         isbn = {0-7695-1585-1},
  13648         doi = {http://dx.doi.org/10.1109/ICDCS.2002.1022312},
  13649         url = {http://dl.acm.org/citation.cfm?id=850928.851884},
  13650         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2702\%20-\%20Reclaiming\%20space\%20for\%20duplicate\%20files.pdf},
  13651         author = {John R. Douceur and Adya, Atul and Bolosky, William J. and Simon, Dan and Marvin Theimer}
  13652 }
  13653 @conference {Dingledine02reliablemix,
  13654         title = {Reliable MIX Cascade Networks through Reputation},
  13655         booktitle = {Financial Cryptography. Springer-Verlag, LNCS 2357},
  13656         year = {2002},
  13657         publisher = {Springer Verlag},
  13658         organization = {Springer Verlag},
  13659         abstract = {We describe a MIX cascade protocol and a reputation system that together increase the reliability of a network of MIX cascades. In our protocol, MIX nodes periodically generate a communally random seed that, along with their reputations, determines cascade configuration},
  13660         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.9316\&rep=rep1\&type=pdf},
  13661         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.9316.pdf},
  13662         author = {Roger Dingledine and Paul Syverson}
  13663 }
  13664 @conference {casc-rep,
  13665         title = {Reliable MIX Cascade Networks through Reputation},
  13666         booktitle = {Proceedings of Financial Cryptography (FC '02)},
  13667         year = {2002},
  13668         month = mar,
  13669         publisher = {Springer-Verlag, LNCS 2357},
  13670         organization = {Springer-Verlag, LNCS 2357},
  13671         abstract = {We describe a MIX cascade protocol and a reputation system that together increase the reliability of a network of MIX cascades. In our protocol, MIX nodes periodically generate a communally random seed that, along with their reputations, determines cascade configuration. Nodes send test messages to monitor their cascades. Senders can also demonstrate message decryptions to convince honest cascade members that a cascade is misbehaving. By allowing any node to declare the failure of its own cascade, we eliminate the need for global trusted witnesses},
  13672         www_section = {anonymity, communal randomness, P2P, reputation},
  13673         isbn = {978-3-540-00646-6},
  13674         doi = {10.1007/3-540-36504-4},
  13675         url = {http://www.springerlink.com/content/g67u25lm80234qj4/},
  13676         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/casc-rep.pdf},
  13677         author = {Roger Dingledine and Paul Syverson},
  13678         editor = {Matt Blaze}
  13679 }
  13680 @conference {Cohen02replicationstrategies,
  13681         title = {Replication Strategies in Unstructured Peer-to-Peer Networks},
  13682         booktitle = {Proceedings of the 2002 SIGCOMM conference},
  13683         volume = {Volume 32 ,  Issue 4},
  13684         year = {2002},
  13685         month = oct,
  13686         pages = {177--190},
  13687         publisher = {ACM  New York, NY, USA},
  13688         organization = {ACM  New York, NY, USA},
  13689         address = {Pittsburgh},
  13690         abstract = {The Peer-to-Peer (P2P) architectures that are most prevalent in today's Internet are decentralized and unstructured. Search is blind in that it is independent of the query and is thus not more effective than probing randomly chosen peers. One technique to improve the effectiveness of blind search is to proactively replicate data},
  13691         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.9873\&rep=rep1\&type=pdf},
  13692         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/replication.pdf},
  13693         author = {Edith Cohen and S Shenker}
  13694 }
  13695 @conference {Damiani02areputation-based_0,
  13696         title = {A Reputation-Based Approach for Choosing Reliable Resources in Peer-to-Peer Networks},
  13697         booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications Security},
  13698         year = {2002},
  13699         pages = {207--216},
  13700         publisher = {ACM Press},
  13701         organization = {ACM Press},
  13702         abstract = {Peer-to-peer (P2P) applications have seen an enormous success, and recently introduced P2P services have reached tens of millions of users. A feature that significantly contributes to the success of many P2P applications is user anonymity. However, anonymity opens the door to possible misuses and abuses, exploiting the P2P network as a way to spread tampered with resources, including Trojan Horses, viruses, and spam. To address this problem we propose a self-regulating system where the P2P network is used to implement a robust reputation mechanism. Reputation sharing is realized through a distributed polling algorithm by which resource requestors can assess the reliability of a resource offered by a participant before initiating the download. This way, spreading of malicious contents will be reduced and eventually blocked. Our approach can be straightforwardly piggybacked on existing P2P protocols and requires modest modifications to current implementations},
  13703         url = {http://seclab.dti.unimi.it/Papers/ccs02.ps  },
  13704         author = {Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and Pierangela Samarati and Fabio Violante}
  13705 }
  13706 @conference {Damiani02areputation-based,
  13707         title = {A Reputation-Based Approach for Choosing Reliable Resources in Peer-to-Peer Networks},
  13708         booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications Security},
  13709         year = {2002},
  13710         pages = {207--216},
  13711         publisher = {ACM Press},
  13712         organization = {ACM Press},
  13713         abstract = {Peer-to-peer (P2P) applications have seen an enormous success, and recently introduced P2P services have reached tens of millions of users. A feature that significantly contributes to the success of many P2P applications is user anonymity. However, anonymity opens the door to possible misuses and abuses, exploiting the P2P network as a way to spread tampered with resources, including Trojan Horses, viruses, and spam. To address this problem we propose a self-regulating system where the P2P network is used to implement a robust reputation mechanism. Reputation sharing is realized through a distributed polling algorithm by which resource requestors can assess the reliability of a resource offered by a participant before initiating the download. This way, spreading of malicious contents will be reduced and eventually blocked. Our approach can be straightforwardly piggybacked on existing P2P protocols and requires modest modifications to current implementations},
  13714         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.1784\&rep=rep1\&type=pdf},
  13715         author = {Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and Pierangela Samarati and Fabio Violante}
  13716 }
  13717 @conference {beimel-robust,
  13718         title = {Robust information-theoretic private information retrieval},
  13719         booktitle = {Proceedings of the 3rd Conference on Security in Communication Networks},
  13720         series = {Lecture Notes in Computer Science},
  13721         volume = {2576},
  13722         year = {2002},
  13723         pages = {326--341},
  13724         publisher = {Springer-Verlag},
  13725         organization = {Springer-Verlag},
  13726         abstract = {A Private Information Retrieval (PIR) protocol allows a user to retrieve a data item of its choice from a database, such that the servers storing the database do not gain information on the identity of the item being retrieved. PIR protocols were studied in depth since the subject was introduced in Chor, Goldreich, Kushilevitz, and Sudan 1995. The standard definition of PIR protocols raises a simple question--what happens if some of the servers crash during the operation? How can we devise a protocol which still works in the presence of crashing servers? Current systems do not guarantee availability of servers at all times for many reasons, e.g., crash of server or communication problems. Our purpose is to design robust PIR protocols, i.e., protocols which still work correctly even if only k out of l servers are available during the protocols' operation (the user does not know in advance which servers are available). We present various robust PIR protocols giving different tradeofis between the different parameters. These protocols are incomparable, i.e., for different values of n and k we will get better results using different protocols. We first present a generic transformation from regular PIR protocols to robust PIR protocols, this transformation is important since any improvement in the communication complexity of regular PIR protocol will immediately implicate improvement in the robust PIR protocol communication. We also present two specific robust PIR protocols. Finally, we present robust PIR protocols which can tolerate Byzantine servers, i.e., robust PIR protocols which still work in the presence of malicious servers or servers with corrupted or obsolete databases},
  13727         www_section = {obsolete database, private information retrieval, robustness},
  13728         isbn = {978-3-540-00420-2},
  13729         doi = {10.1007/3-540-36413-7},
  13730         url = {http://www.springerlink.com/content/9bnlbf2e2lp9u9p4/},
  13731         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BS.pdf},
  13732         author = {Amos Beimel and Yoav Stahl}
  13733 }
  13734 @conference {633045,
  13735         title = {Scalable application layer multicast},
  13736         booktitle = {SIGCOMM '02: Proceedings of the 2002 conference on Applications, technologies, architectures, and protocols for computer communications},
  13737         year = {2002},
  13738         pages = {205--217},
  13739         publisher = {ACM},
  13740         organization = {ACM},
  13741         address = {New York, NY, USA},
  13742         abstract = {We describe a new scalable application-layer multicast protocol, specifically designed for low-bandwidth, data streaming applications with large receiver sets. Our scheme is based upon a hierarchical clustering of the application-layer multicast peers and can support a number of different data delivery trees with desirable properties.We present extensive simulations of both our protocol and the Narada application-layer multicast protocol over Internet-like topologies. Our results show that for groups of size 32 or more, our protocol has lower link stress (by about 25\%), improved or similar end-to-end latencies and similar failure recovery properties. More importantly, it is able to achieve these results by using orders of magnitude lower control traffic.Finally, we present results from our wide-area testbed in which we experimented with 32-100 member groups distributed over 8 different sites. In our experiments, average group members established and maintained low-latency paths and incurred a maximum packet loss rate of less than 1\% as members randomly joined and left the multicast group. The average control overhead during our experiments was less than 1 Kbps for groups of size 100},
  13743         www_section = {application layer multicast, hierarchy, overlay networks, P2P, scalability},
  13744         isbn = {1-58113-570-X},
  13745         doi = {10.1145/633025.633045},
  13746         url = {http://portal.acm.org/citation.cfm?id=633045$\#$},
  13747         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sigcomm02.pdf},
  13748         author = {Banerjee, Suman and Bobby Bhattacharjee and Kommareddy, Christopher}
  13749 }
  13750 @mastersthesis {937250,
  13751         title = {A scalable content-addressable network},
  13752         year = {2002},
  13753         note = {Chair-Shenker, Scott and Chair-Stoica, Ion},
  13754         school = {University of California, Berkeley},
  13755         type = {phd},
  13756         www_section = {CAN, distributed hash table},
  13757         url = {www.icir.org/sylvia/thesis.ps},
  13758         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/can.pdf},
  13759         author = {Sylvia Paul Ratnasamy}
  13760 }
  13761 @article {Castro02scribe:a,
  13762         title = {SCRIBE: A large-scale and decentralized application-level multicast infrastructure},
  13763         journal = {IEEE Journal on Selected Areas in Communications (JSAC)},
  13764         volume = {20},
  13765         year = {2002},
  13766         pages = {0--2002},
  13767         abstract = {This paper presents Scribe, a scalable application-level multicast infrastructure. Scribe supports large numbers of groups, with a potentially large number of members per group. Scribe is built on top of Pastry, a generic peer-to-peer object location and routing substrate overlayed on the Internet, and leverages Pastry's reliability, self-organization, and locality properties. Pastry is used to create and manage groups and to build efficient multicast trees for the dissemination of messages to each group. Scribe provides best-effort reliability guarantees, but we outline how an application can extend Scribe to provide stronger reliability. Simulation results, based on a realistic network topology model, show that Scribe scales across a wide range of groups and group sizes. Also, it balances the load on the nodes while achieving acceptable delay and link stress when compared to IP multicast},
  13768         www_section = {distributed hash table, multicast, Scribe},
  13769         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.299\&rep=rep1\&type=pdf},
  13770         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jsac.pdf},
  13771         author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Antony Rowstron}
  13772 }
  13773 @conference {Douceur:2002:SDS:784592.784803,
  13774         title = {A Secure Directory Service based on Exclusive Encryption},
  13775         booktitle = {ACSAC'02--Proceedings of the 18th Annual Computer Security Applications Conference},
  13776         series = {ACSAC '02},
  13777         year = {2002},
  13778         month = dec,
  13779         pages = {0--172},
  13780         publisher = {IEEE Computer Society},
  13781         organization = {IEEE Computer Society},
  13782         address = {San Diego, CA, USA},
  13783         abstract = {We describe the design of a Windows file-system directory service that ensures the persistence, integrity, privacy, syntactic legality, and case-insensitive uniqueness of the names it indexes. Byzantine state replication provides persistence and integrity, and encryption imparts privacy. To enforce Windows' baroque name syntax--including restrictions on allowable characters, on the terminal character, and on several specific names--we develop a cryptographic process, called "exclusive encryption," that inherently excludes syntactically illegal names and that enables the exclusion of case-insensitively duplicate names without access to their plaintext. This process excludes entire names by mapping the set of allowed strings to the set of all strings, excludes certain characters through an amended prefix encoding, excludes terminal characters through varying the prefix coding by character index, and supports case-insensitive comparison of names by extracting and encrypting case information separately. We also address the issues of hiding name-length information and access-authorization information, and we report a newly discovered problem with enforcing case-insensitive uniqueness for Unicode names},
  13784         www_section = {directory service, encryption, exclusive encryption, Windows},
  13785         isbn = {0-7695-1828-1},
  13786         doi = {http://dx.doi.org/10.1109/CSAC.2002.1176289},
  13787         url = {http://dl.acm.org/citation.cfm?id=784592.784803},
  13788         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2702\%20-\%20A\%20secure\%20directory\%20service\%20based\%20on\%20exclusive\%20encryption.pdf},
  13789         author = {John R. Douceur and Adya, Atul and Benaloh, Josh and Bolosky, William J. and Yuval, Gideon}
  13790 }
  13791 @article {844156,
  13792         title = {Secure routing for structured peer-to-peer overlay networks},
  13793         journal = {SIGOPS Oper. Syst. Rev},
  13794         volume = {36},
  13795         number = {SI},
  13796         year = {2002},
  13797         pages = {299--314},
  13798         publisher = {ACM},
  13799         address = {New York, NY, USA},
  13800         abstract = {Structured peer-to-peer overlay networks provide a substrate for the construction of large-scale, decentralized applications, including distributed storage, group communication, and content distribution. These overlays are highly resilient; they can route messages correctly even when a large fraction of the nodes crash or the network partitions. But current overlays are not secure; even a small fraction of malicious nodes can prevent correct message delivery throughout the overlay. This problem is particularly serious in open peer-to-peer systems, where many diverse, autonomous parties without preexisting trust relationships wish to pool their resources. This paper studies attacks aimed at preventing correct message delivery in structured peer-to-peer overlays and presents defenses to these attacks. We describe and evaluate techniques that allow nodes to join the overlay, to maintain routing state, and to forward messages securely in the presence of malicious nodes},
  13801         www_section = {P2P, resilient overlay network},
  13802         issn = {0163-5980},
  13803         doi = {10.1145/844128.844156},
  13804         url = {http://portal.acm.org/citation.cfm?id=844156$\#$},
  13805         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/osdi2002.pdf},
  13806         author = {Miguel Castro and Peter Druschel and Ganesh, Ayalvadi and Antony Rowstron and Dan S. Wallach}
  13807 }
  13808 @conference {Karlof02securerouting,
  13809         title = {Secure Routing in Wireless Sensor Networks: Attacks and Countermeasures},
  13810         booktitle = {In First IEEE International Workshop on Sensor Network Protocols and Applications},
  13811         year = {2002},
  13812         pages = {113--127},
  13813         abstract = {We consider routing security in wireless sensor networks. Many sensor network routing protocols have been proposed, but none of them have been designed with security as a goal. We propose security goals for routing in sensor networks, show how attacks against ad-hoc and peer-to-peer networks can be adapted into powerful attacks against sensor networks, introduce two classes of novel attacks against sensor networks --- sinkholes and HELLO floods, and analyze the security of all the major sensor network routing protocols. We describe crippling attacks against all of them and suggest countermeasures and design considerations. This is the first such analysis of secure routing in sensor networks},
  13814         www_section = {ad-hoc networks, P2P, sensor networks},
  13815         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.4672},
  13816         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sensor-route-security_0.pdf},
  13817         author = {Chris Karlof and David Wagner}
  13818 }
  13819 @conference {687810,
  13820         title = {Security Considerations for Peer-to-Peer Distributed Hash Tables},
  13821         booktitle = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  13822         year = {2002},
  13823         pages = {261--269},
  13824         publisher = {Springer-Verlag},
  13825         organization = {Springer-Verlag},
  13826         address = {London, UK},
  13827         abstract = {Recent peer-to-peer research has focused on providing efficient hash lookup systems that can be used to build more complex systems. These systems have good properties when their algorithms are executed correctly but have not generally considered how to handle misbehaving nodes. This paper looks at what sorts of security problems are inherent in large peer-to-peer systems based on distributed hash lookup systems. We examine the types of problems that such systems might face, drawing examples from existing systems, and propose some design principles for detecting and preventing these problems},
  13828         www_section = {distributed hash table, P2P},
  13829         isbn = {3-540-44179-4},
  13830         url = {http://portal.acm.org/citation.cfm?id=687810$\#$},
  13831         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/173.pdf},
  13832         author = {Emil Sit and Robert Morris}
  13833 }
  13834 @conference {camenisch2002ssep,
  13835         title = {A Signature Scheme with Efficient Protocols},
  13836         booktitle = {Proceedings of SCN '02, Third Conference on Security in Communication Networks},
  13837         year = {2002},
  13838         pages = {268--289},
  13839         publisher = {Springer Verlag, LNCS 2576},
  13840         organization = {Springer Verlag, LNCS 2576},
  13841         abstract = {Digital signature schemes are a fundamental cryptographic primitive, of use both in its own right, and as a building block in cryptographic protocol design. In this paper, we propose a practical and provably secure signature scheme and show protocols (1) for issuing a signature on a committed value (so the signer has no information about the signed value), and (2) for proving knowledge of a signature on a committed value. This signature scheme and corresponding protocols are a building block for the design of anonymity-enhancing cryptographic systems, such as electronic cash, group signatures, and anonymous credential systems. The security of our signature scheme and protocols relies on the Strong RSA assumption. These results are a generalization of the anonymous credential system of Camenisch and Lysyanskaya},
  13842         www_section = {anonymity, anonymous credential system, digital signature},
  13843         isbn = {978-3-540-00420-2},
  13844         doi = {10.1007/3-540-36413-7},
  13845         url = {http://www.springerlink.com/content/r66ywt172y06g5qr/},
  13846         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camenisch2002ssep.pdf},
  13847         author = {Jan Camenisch and Anna Lysyanskaya}
  13848 }
  13849 @conference {Byers02simpleload,
  13850         title = {Simple Load Balancing for Distributed Hash Tables},
  13851         booktitle = {Simple Load Balancing for Distributed Hash Tables},
  13852         year = {2002},
  13853         pages = {80--87},
  13854         abstract = {Distributed hash tables have recently become a useful building block for a variety of distributed applications. However, current schemes based upon consistent hashing require both considerable implementation complexity and substantial storage overhead to achieve desired load balancing goals. We argue in this paper that these goals can be achieved more simply and more cost-effectively. First, we suggest the direct application of the power of two choices paradigm, whereby an item is stored at the less loaded of two (or more) random alternatives. We then consider how associating a small constant number of hash values with a key can naturally be extended to support other load balancing strategies, including load-stealing or load-shedding, as well as providing natural fault-tolerance mechanisms},
  13855         www_section = {distributed hash table, load balancing},
  13856         doi = {10.1007/b11823},
  13857         url = {http://www.springerlink.com/content/r9r4qcqxc2bmfqmr/},
  13858         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.277.pdf},
  13859         author = {Byers, John W. and Jeffrey Considine and Michael Mitzenmacher}
  13860 }
  13861 @conference {Capkun02smallworlds,
  13862         title = {Small Worlds in Security Systems: an Analysis of the PGP Certificate Graph},
  13863         booktitle = {In Proceedings of The ACM New Security Paradigms Workshop},
  13864         year = {2002},
  13865         pages = {28--35},
  13866         publisher = {ACM Press},
  13867         organization = {ACM Press},
  13868         abstract = {We propose a new approach to securing self-organized mobile ad hoc networks. In this approach, security is achieved in a fully self-organized manner; by this we mean that the security system does not require any kind of certification authority or centralized server, even for the initialization phase. In our work, we were inspired by PGP [15] because its operation relies solely on the acquaintances between users. We show that the small-world phenomenon naturally emerges in the PGP system as a consequence of the self-organization of users. We show this by studying the PGP certificate graph properties and by quantifying its small-world characteristics. We argue that the certificate graphs of self-organized security systems will exhibit a similar small-world phenomenon, and we provide a way to model self-organized certificate graphs. The results of the PGP certificate graph analysis and graph modelling can be used to build new self-organized security systems and to test the performance of the existing proposals. In this work, we refer to such an example},
  13869         www_section = {PGP, public key management, self-organization, small-world},
  13870         isbn = {1-58113-598-X},
  13871         doi = {10.1145/844102.844108},
  13872         url = {http://portal.acm.org/citation.cfm?id=844102.844108},
  13873         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.5408.pdf},
  13874         author = {Srdan Capkun and Levente Butty{\'a}n and Jean-Pierre Hubaux}
  13875 }
  13876 @article {567178,
  13877         title = {A State-of-the-Art Survey on Software Merging},
  13878         journal = {IEEE Trans. Softw. Eng},
  13879         volume = {28},
  13880         number = {5},
  13881         year = {2002},
  13882         pages = {449--462},
  13883         publisher = {IEEE Press},
  13884         address = {Piscataway, NJ, USA},
  13885         abstract = {Software merging is an essential aspect of the maintenance and evolution of large-scale software systems. This paper provides a comprehensive survey and analysis of available merge approaches. Over the years, a wide variety of different merge techniques has been proposed. While initial techniques were purely based on textual merging, more powerful approaches also take the syntax and semantics of the software into account. There is a tendency towards operation-based merging because of its increased expressiveness. Another tendency is to try to define merge techniques that are as general, accurate, scalable, and customizable as possible, so that they can be used in any phase in the software life-cycle and detect as many conflicts as possible. After comparing the possible merge techniques, we suggest a number of important open problems and future research directions},
  13886         www_section = {conflict detection, large-scale software development, merge conflicts, software merging},
  13887         issn = {0098-5589},
  13888         doi = {10.1109/TSE.2002.1000449},
  13889         url = {http://portal.acm.org/citation.cfm?id=567178$\#$},
  13890         author = {Mens, Tom}
  13891 }
  13892 @conference {TrafHTTP,
  13893         title = {Statistical Identification of Encrypted Web Browsing Traffic},
  13894         booktitle = {Proceedings of the 2002 IEEE Symposium on Security and Privacy},
  13895         year = {2002},
  13896         month = {May},
  13897         publisher = {IEEE Computer Society  Washington, DC, USA},
  13898         organization = {IEEE Computer Society  Washington, DC, USA},
  13899         address = {Berkeley, California},
  13900         abstract = {Encryption is often proposed as a tool for protecting the privacy of World Wide Web browsing.However, encryption--particularly astypically implemented in, or in concert with popular Webbrowsers--does not hide all information about the encryptedplaintext.Specifically, HTTP object count and sizes are oftenrevealed (or at least incompletely concealed). We investigate theidentifiability of World Wide Web traffic based on this unconcealedinformation in a large sample of Web pages, and show that it sufficesto identify a significant fraction of them quite reliably.We also suggest some possible countermeasures against the exposure of this kind of information and experimentally evaluate their effectiveness},
  13901         www_section = {encryption, privacy},
  13902         isbn = {0-7695-1543-6},
  13903         url = {http://portal.acm.org/citation.cfm?id=830535},
  13904         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tr-2002-23.pdf},
  13905         author = {Qixiang Sun and Daniel R. Simon and Yi-Min Wang and Wilf Russell and Venkata N. Padmanabhan and Lili Qiu}
  13906 }
  13907 @booklet {Montenegro02statisticallyunique,
  13908         title = {Statistically Unique and Cryptographically Verifiable (SUCV) Identifiers and Addresses},
  13909         year = {2002},
  13910         abstract = {This paper addresses the identifier ownership problem. It does so by using characteristics of Statistic Uniqueness and Cryptographic Verifiability (SUCV) of certain entities which this document calls SUCV Identifiers and Addresses. Their characteristics allow them to severely limit certain classes of denial of service attacks and hijacking attacks. SUCV addresses are particularly applicable to solve the address ownership problem that hinders mechanisms like Binding Updates in Mobile IPv6},
  13911         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.16.1456},
  13912         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.1456.pdf},
  13913         author = {Gabriel Montenegro}
  13914 }
  13915 @conference {wallach02p2psecurity,
  13916         title = {A Survey of Peer-to-Peer Security Issues},
  13917         booktitle = {ISSS},
  13918         year = {2002},
  13919         pages = {42--57},
  13920         abstract = { Peer-to-peer (p2p) networking technologies have gained popularity as a mechanism for users to share files without the need for centralized servers. A p2p network provides a scalable and fault-tolerant mechanism to locate nodes anywhere on a network without maintaining a large amount of routing state. This allows for a variety of applications beyond simple file sharing. Examples include multicast systems, anonymous communications systems, and web caches. We survey security issues that occur in the underlying p2p routing protocols, as well as fairness and trust issues that occur in file sharing and other p2p applications. We discuss how techniques, ranging from cryptography, to random network probing, to economic incentives, can be used to address these problems},
  13921         www_section = {cryptography, P2P, routing, security policy},
  13922         url = {http://springerlink.metapress.com/openurl.asp?genre=article\&issn=0302-9743\&volume=2609\&spage=42},
  13923         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.84.9197.pdf},
  13924         author = {Dan S. Wallach}
  13925 }
  13926 @article {568525,
  13927         title = {A survey of rollback-recovery protocols in message-passing systems},
  13928         journal = {ACM Comput. Surv},
  13929         volume = {34},
  13930         number = {3},
  13931         year = {2002},
  13932         pages = {375--408},
  13933         publisher = {ACM},
  13934         address = {New York, NY, USA},
  13935         abstract = {This survey covers rollback-recovery techniques that do not require special language constructs. In the first part of the survey we classify rollback-recovery protocols into checkpoint-based and log-based. Checkpoint-based protocols rely solely on checkpointing for system state restoration. Checkpointing can be coordinated, uncoordinated, or communication-induced. Log-based protocols combine checkpointing with logging of nondeterministic events, encoded in tuples called determinants. Depending on how determinants are logged, log-based protocols can be pessimistic, optimistic, or causal. Throughout the survey, we highlight the research issues that are at the core of rollback-recovery and present the solutions that currently address them. We also compare the performance of different rollback-recovery protocols with respect to a series of desirable properties and discuss the issues that arise in the practical implementations of these protocols },
  13936         www_section = {message logging, rollback-recovery},
  13937         issn = {0360-0300},
  13938         doi = {10.1145/568522.568525},
  13939         url = {http://portal.acm.org/citation.cfm?id=568522.568525$\#$},
  13940         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CMU-CS-99-148.pdf},
  13941         author = {Mootaz Elnozahy and Lorenzo Alvisi and Yi-Min Wang and Johnson, David B.}
  13942 }
  13943 @conference {Douceur:2002:SA:646334.687813,
  13944         title = {The Sybil Attack},
  13945         booktitle = {IPTPS'01--Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  13946         series = {Revised Papers from the First International Workshop on Peer-to-Peer Systems},
  13947         year = {2002},
  13948         month = mar,
  13949         pages = {251--260},
  13950         publisher = {Springer-Verlag London},
  13951         organization = {Springer-Verlag London},
  13952         address = {Cambridge, MA},
  13953         abstract = {Large-scale peer-to-peer systems face security threats from faulty or hostile remote computing elements. To resist these threats, many such systems employ redundancy. However, if a single faulty entity can present multiple identities, it can control a substantial fraction of the system, thereby undermining this redundancy. One approach to preventing these "Sybil attacks" is to have a trusted agency certify identities. This paper shows that, without a logically centralized authority, Sybil attacks are always possible except under extreme and unrealistic assumptions of resource parity and coordination among entities},
  13954         www_section = {attack, peer-to-peer networking, security threat, Sybil attack},
  13955         isbn = {3-540-44179-4},
  13956         url = {http://dl.acm.org/citation.cfm?id=646334.687813},
  13957         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2702\%20-\%20Douceur\%20-\%20The\%20Sybil\%20Attack.pdf},
  13958         author = {John R. Douceur}
  13959 }
  13960 @conference {tarzan:ccs02,
  13961         title = {Tarzan: A Peer-to-Peer Anonymizing Network Layer},
  13962         booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications Security (CCS 2002)},
  13963         year = {2002},
  13964         month = {November},
  13965         publisher = {ACM  New York, NY, USA},
  13966         organization = {ACM  New York, NY, USA},
  13967         address = {Washington, DC},
  13968         abstract = {Tarzan is a peer-to-peer anonymous IP network overlay. Because it provides IP service, Tarzan is general-purpose and transparent to applications. Organized as a decentralized peer-to-peer overlay, Tarzan is fault-tolerant, highly scalable, and easy to manage.Tarzan achieves its anonymity with layered encryption and multi-hop routing, much like a Chaumian mix. A message initiator chooses a path of peers pseudo-randomly through a restricted topology in a way that adversaries cannot easily influence. Cover traffic prevents a global observer from using traffic analysis to identify an initiator. Protocols toward unbiased peer-selection offer new directions for distributing trust among untrusted entities.Tarzan provides anonymity to either clients or servers, without requiring that both participate. In both cases, Tarzan uses a network address translator (NAT) to bridge between Tarzan hosts and oblivious Internet hosts.Measurements show that Tarzan imposes minimal overhead over a corresponding non-anonymous overlay route},
  13969         www_section = {fault-tolerance, overhead, P2P},
  13970         isbn = {1-58113-612-9},
  13971         doi = {10.1145/586110.586137},
  13972         url = {http://portal.acm.org/citation.cfm?id=586137},
  13973         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tarzan-ccs02.pdf},
  13974         author = {Michael J. Freedman and Robert Morris}
  13975 }
  13976 @article {Clifton:2002:TPP:772862.772867,
  13977         title = {Tools for privacy preserving distributed data mining},
  13978         journal = {SIGKDD Explorations Newsletter},
  13979         volume = {4},
  13980         number = {2},
  13981         year = {2002},
  13982         month = dec,
  13983         pages = {28--34},
  13984         publisher = {ACM},
  13985         address = {New York, NY, USA},
  13986         abstract = {Privacy preserving mining of distributed data has numerous applications. Each application poses different constraints: What is meant by privacy, what are the desired results, how is the data distributed, what are the constraints on collaboration and cooperative computing, etc. We suggest that the solution to this is a toolkit of components that can be combined for specific privacy-preserving data mining applications. This paper presents some components of such a toolkit, and shows how they can be used to solve several privacy-preserving data mining problems},
  13987         www_section = {PIR, privacy, security},
  13988         issn = {1931-0145},
  13989         doi = {10.1145/772862.772867},
  13990         url = {http://doi.acm.org/10.1145/772862.772867},
  13991         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGKDD\%20Explor.\%20Newsl.\%20-\%20Distributed\%20data\%20mining.pdf},
  13992         author = {Clifton, Chris and Kantarcioglu, Murat and Vaidya, Jaideep and Lin, Xiaodong and Zhu, Michael Y.}
  13993 }
  13994 @booklet {Serjantov02towardsan,
  13995         title = {Towards an Information Theoretic Metric for Anonymity},
  13996         journal = {	Lecture Notes in Computer Science},
  13997         volume = {Volume 2482/2003},
  13998         year = {2002},
  13999         pages = {41--53},
  14000         publisher = {Springer-Verlag},
  14001         abstract = {In this paper we look closely at the popular metric of anonymity, the anonymity set, and point out a number of problems associated with it. We then propose an alternative information theoretic measure of anonymity which takes into account the probabilities of users sending and receiving the messages and show how to calculate it for a message in a standard mix-based anonymity system. We also use our metric to compare a pool mix to a traditional threshold mix, which was impossible using anonymity sets. We also show how the maximum route length restriction which exists in some fielded anonymity systems can lead to the attacker performing more powerful traffic analysis. Finally, we discuss open problems and future work on anonymity measurements},
  14002         isbn = {978-3-540-00565-0 },
  14003         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.12.5992\&rep=rep1\&type=url\&i=0},
  14004         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/set.dvi_.pdf},
  14005         author = {Andrei Serjantov and George Danezis}
  14006 }
  14007 @conference {Serj02,
  14008         title = {Towards an Information Theoretic Metric for Anonymity},
  14009         booktitle = {Proceedings of Privacy Enhancing Technologies Workshop (PET 2002)},
  14010         year = {2002},
  14011         month = apr,
  14012         publisher = {Springer-Verlag, LNCS 2482},
  14013         organization = {Springer-Verlag, LNCS 2482},
  14014         abstract = {In this paper we look closely at the popular metric of anonymity, the anonymity set, and point out a number of problems associated with it. We then propose an alternative information theoretic measure of anonymity which takes into account the probabilities of users sending and receiving the messages and show how to calculate it for a message in a standard mix-based anonymity system. We also use our metric to compare a pool mix to a traditional threshold mix, which was impossible using anonymity sets. We also show how the maximum route length restriction which exists in some fielded anonymity systems can lead to the attacker performing more powerful traffic analysis. Finally, we discuss open problems and future work on anonymity measurements},
  14015         www_section = {anonymity measurement, traffic analysis},
  14016         isbn = {978-3-540-00565-0},
  14017         doi = {10.1007/3-540-36467-6},
  14018         url = {http://www.springerlink.com/content/wwe2c7g3hmwn0klf/},
  14019         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.5992.pdf},
  14020         author = {Andrei Serjantov and George Danezis},
  14021         editor = {Roger Dingledine and Paul Syverson}
  14022 }
  14023 @booklet {Diaz02towardsmeasuring,
  14024         title = {Towards Measuring Anonymity},
  14025         year = {2002},
  14026         publisher = {Springer-Verlag},
  14027         abstract = {This paper introduces an information theoretic model that allows to quantify the degree of anonymity provided by schemes for anonymous connections. It considers attackers that obtain probabilistic information about users. The degree is based on the probabilities an attacker, after observing the system, assigns to the dierent users of the system as being the originators of a message. As a proof of concept, the model is applied to some existing systems. The model is shown to be very useful for evaluating the level of privacy a system provides under various attack scenarios, for measuring the amount of information an attacker gets with a particular attack and for comparing dierent systems amongst each other},
  14028         url = {http://www.cosic.esat.kuleuven.be/publications/article-89.pdf},
  14029         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/anonimity.pdf},
  14030         author = {Claudia Diaz and Stefaan Seys and Joris Claessens and Bart Preneel}
  14031 }
  14032 @conference {Diaz02,
  14033         title = {Towards measuring anonymity},
  14034         booktitle = {Proceedings of Privacy Enhancing Technologies Workshop (PET 2002)},
  14035         year = {2002},
  14036         month = apr,
  14037         publisher = {Springer-Verlag, LNCS 2482},
  14038         organization = {Springer-Verlag, LNCS 2482},
  14039         abstract = {This paper introduces an information theoretic model that allows to quantify the degree of anonymity provided by schemes for anonymous connections. It considers attackers that obtain probabilistic information about users. The degree is based on the probabilities an attacker, after observing the system, assigns to the different users of the system as being the originators of a message. As a proof of concept, the model is applied to some existing systems. The model is shown to be very useful for evaluating the level of privacy a system provides under various attack scenarios, for measuring the amount of information an attacker gets with a particular attack and for comparing different systems amongst each other},
  14040         www_section = {anonymity, attack, privacy},
  14041         isbn = {978-3-540-00565-0},
  14042         doi = {10.1007/3-540-36467-6},
  14043         url = {http://www.springerlink.com/content/3qb837jkpgukc6b5/},
  14044         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/article-89.pdf},
  14045         author = {Claudia Diaz and Stefaan Seys and Joris Claessens and Bart Preneel},
  14046         editor = {Roger Dingledine and Paul Syverson}
  14047 }
  14048 @conference {633027,
  14049         title = {Understanding BGP misconfiguration},
  14050         booktitle = {SIGCOMM '02: Proceedings of the 2002 conference on Applications, technologies, architectures, and protocols for computer communications},
  14051         year = {2002},
  14052         pages = {3--16},
  14053         publisher = {ACM},
  14054         organization = {ACM},
  14055         address = {New York, NY, USA},
  14056         abstract = {It is well-known that simple, accidental BGP configuration errors can disrupt Internet connectivity. Yet little is known about the frequency of misconfiguration or its causes, except for the few spectacular incidents of widespread outages. In this paper, we present the first quantitative study of BGP misconfiguration. Over a three week period, we analyzed routing table advertisements from 23 vantage points across the Internet backbone to detect incidents of misconfiguration. For each incident we polled the ISP operators involved to verify whether it was a misconfiguration, and to learn the cause of the incident. We also actively probed the Internet to determine the impact of misconfiguration on connectivity.Surprisingly, we find that configuration errors are pervasive, with 200-1200 prefixes (0.2-1.0\% of the BGP table size) suffering from misconfiguration each day. Close to 3 in 4 of all new prefix advertisements were results of misconfiguration. Fortunately, the connectivity seen by end users is surprisingly robust to misconfigurations. While misconfigurations can substantially increase the update load on routers, only one in twenty five affects connectivity. While the causes of misconfiguration are diverse, we argue that most could be prevented through better router design},
  14057         www_section = {border gateway protocol},
  14058         isbn = {1-58113-570-X},
  14059         doi = {10.1145/633025.633027},
  14060         url = {http://portal.acm.org/citation.cfm?id=633027$\#$},
  14061         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bgpmisconfig.pdf},
  14062         author = {Mahajan, Ratul and Wetherall, David and Anderson, Thomas}
  14063 }
  14064 @conference {kesdogan:pet2002,
  14065         title = {Unobservable Surfing on the World Wide Web: Is Private Information Retrieval an alternative to the MIX based Approach?},
  14066         booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)},
  14067         year = {2002},
  14068         month = {April},
  14069         publisher = {Springer-Verlag, LNCS 2482},
  14070         organization = {Springer-Verlag, LNCS 2482},
  14071         abstract = {The technique Private Information Retrieval (PIR) perfectly protects a user's access pattern to a database. An attacker cannot observe (or determine) which data element is requested by a user and so cannot deduce the interest of the user. We discuss the application of PIR on the World Wide Web and compare it to the MIX approach. We demonstrate particularly that in this context the method does not provide perfect security, and we give a mathematical model for the amount of information an attacker could obtain. We provide an extension of the method under which perfect security can still be achieved},
  14072         www_section = {private information retrieval},
  14073         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.80.7678},
  14074         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PIR_Kesdogan.pdf},
  14075         author = {Dogan Kesdogan and Mark Borning and Michael Schmeink},
  14076         editor = {Roger Dingledine and Paul Syverson}
  14077 }
  14078 @conference {651321,
  14079         title = {Venti: A New Approach to Archival Storage},
  14080         booktitle = {FAST '02: Proceedings of the Conference on File and Storage Technologies},
  14081         year = {2002},
  14082         pages = {89--101},
  14083         publisher = {USENIX Association},
  14084         organization = {USENIX Association},
  14085         address = {Berkeley, CA, USA},
  14086         abstract = {This paper describes a network storage system, called Venti, intended for archival data. In this system, a unique hash of a block's contents acts as the block identifier for read and write operations. This approach enforces a write-once policy, preventing accidental or malicious destruction of data. In addition, duplicate copies of a block can be coalesced, reducing the consumption of storage and simplifying the implementation of clients. Venti is a building block for constructing a variety of storage applications such as logical backup, physical backup, and snapshot file systems},
  14087         www_section = {backup, file systems, network storage},
  14088         isbn = {1-880446-03-0},
  14089         url = {http://portal.acm.org/citation.cfm?id=651321$\#$},
  14090         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/venti-fast.pdf},
  14091         author = {Quinlan, Sean and Dorward, Sean}
  14092 }
  14093 @conference {571857,
  14094         title = {Viceroy: a scalable and dynamic emulation of the butterfly},
  14095         booktitle = {PODC '02: Proceedings of the twenty-first annual symposium on Principles of distributed computing},
  14096         year = {2002},
  14097         pages = {183--192},
  14098         publisher = {ACM},
  14099         organization = {ACM},
  14100         address = {New York, NY, USA},
  14101         abstract = {We propose a family of constant-degree routing networks of logarithmic diameter, with the additional property that the addition or removal of a node to the network requires no global coordination, only a constant number of linkage changes in expectation, and a logarithmic number with high probability. Our randomized construction improves upon existing solutions, such as balanced search trees, by ensuring that the congestion of the network is always within a logarithmic factor of the optimum with high probability. Our construction derives from recent advances in the study of peer-to-peer lookup networks, where rapid changes require efficient and distributed maintenance, and where the lookup efficiency is impacted both by the lengths of paths to requested data and the presence or elimination of bottlenecks in the network},
  14102         www_section = {P2P},
  14103         isbn = {1-58113-485-1},
  14104         doi = {10.1145/571825.571857},
  14105         url = {http://portal.acm.org/citation.cfm?id=571857$\#$},
  14106         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/viceroy.pdf},
  14107         author = {Malkhi, Dahlia and Moni Naor and Ratajczak, David}
  14108 }
  14109 @booklet {Hall01onalgorithms,
  14110         title = {On Algorithms for Efficient Data Migration},
  14111         year = {2001},
  14112         abstract = {The data migration problem is the problem of computing an efficient plan for moving data stored on devices in a network from one configuration to another. Load balancing or changing usage patterns could necessitate such a rearrangement of data. In this paper, we consider the case where the objects are fixed-size and the network is complete. The direct migration problem is closely related to edge-coloring. However, because there are space constraints on the devices, the problem is more complex. Our main results are polynomial time algorithms for finding a near-optimal migration plan in the presence of space constraints when a certain number of additional nodes is available as temporary storage, and a 3/2-approximation for the case where data must be migrated directly to its destination},
  14113         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.26.1365\&rep=rep1\&type=pdf},
  14114         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.26.1365.pdf},
  14115         author = {Joseph Hall and Jason D. Hartline and Anna R. Karlin and Jared Saia and John Wilkes}
  14116 }
  14117 @conference {Wright01ananalysis,
  14118         title = {An Analysis of the Degradation of Anonymous Protocols},
  14119         booktitle = {Network and Distributed System Security Symposium},
  14120         year = {2001},
  14121         address = {San Diego, California},
  14122         abstract = {There have been a number of protocols proposed for anonymous network communication. In this paper we prove that when a particular initiator continues communication with a particular responder across path reformations, existing protocols are subject to attacks by corrupt group members that degrade the anonymity of each protocol over time. We use this result to place an upper bound on how long existing protocols including Crowds, Onion Routing, Hordes, and DC-Net, can maintain anonymity in the face of the attacks described. Our results show that fully-connected DC-Net is the most resilient to these attacks, but is subject to simple denial-of-service attacks. Additionally, we show how a variant of the attack allows attackers to setup other participants to falsely appear to be the initiator of a connection},
  14123         url = {http://freehaven.net/anonbib/cache/wright02.pdf},
  14124         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wright02.pdf},
  14125         author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}
  14126 }
  14127 @conference {747491,
  14128         title = {Application-Level Multicast Using Content-Addressable Networks},
  14129         booktitle = {NGC '01: Proceedings of the Third International COST264 Workshop on Networked Group Communication},
  14130         year = {2001},
  14131         pages = {14--29},
  14132         publisher = {Springer-Verlag},
  14133         organization = {Springer-Verlag},
  14134         address = {London, UK},
  14135         abstract = {Most currently proposed solutions to application-level multicast organise the group members into an application-level mesh over which a Distance-Vector routingp rotocol, or a similar algorithm, is used to construct source-rooted distribution trees. The use of a global routing protocol limits the scalability of these systems. Other proposed solutions that scale to larger numbers of receivers do so by restricting the multicast service model to be single-sourced. In this paper, we propose an application-level multicast scheme capable of scaling to large group sizes without restrictingthe service model to a single source. Our scheme builds on recent work on Content-Addressable Networks (CANs). Extendingthe CAN framework to support multicast comes at trivial additional cost and, because of the structured nature of CAN topologies, obviates the need for a multicast routingalg orithm. Given the deployment of a distributed infrastructure such as a CAN, we believe our CAN-based multicast scheme offers the dual advantages of simplicity and scalability},
  14136         www_section = {CAN, mesh networks},
  14137         isbn = {3-540-42824-0},
  14138         doi = {10.1007/3-540-45546-9},
  14139         url = {http://www.springerlink.com/content/ahdgfj8yj9exqe03/},
  14140         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/can-mcast.pdf},
  14141         author = {Sylvia Paul Ratnasamy and Handley, Mark and Richard Karp and S Shenker}
  14142 }
  14143 @conference {gup,
  14144         title = {Authentic Attributes with Fine-Grained Anonymity Protection},
  14145         booktitle = {Proceedings of Financial Cryptography (FC 2000)},
  14146         year = {2001},
  14147         pages = {276--294},
  14148         publisher = {Springer-Verlag, LNCS 1962},
  14149         organization = {Springer-Verlag, LNCS 1962},
  14150         abstract = {Collecting accurate profile information and protecting an individual's privacy are ordinarily viewed as being at odds. This paper presents mechanisms that protect individual privacy while presenting accurate-indeed authenticated-profile information to servers and merchants. In particular, we give a pseudonym registration scheme and system that enforces unique user registration while separating trust required of registrars, issuers, and validators. This scheme enables the issuance of global unique pseudonyms (GUPs) and attributes enabling practical applications such as authentication of accurate attributes and enforcement of {\textquotedblleft}one-to-a-customer{\textquotedblright} properties.
  14151 We also present a scheme resilient to even pseudonymous profiling yet preserving the ability of merchants to authenticate the accuracy of information. It is the first mechanism of which the authors are aware to guarantee recent validity for group signatures, and more generally multi-group signatures, thus effectively enabling revocation of all or some of the multi-group certificates held by a principal},
  14152         www_section = {privacy, pseudonym},
  14153         isbn = {978-3-540-42700-1},
  14154         doi = {10.1007/3-540-45472-1},
  14155         url = {http://portal.acm.org/citation.cfm?id=728483},
  14156         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gup.pdf},
  14157         author = {Stuart Stubblebine and Paul Syverson},
  14158         editor = {Yair Frankel}
  14159 }
  14160 @article {2001_0,
  14161         title = {Automated Negotiation: Prospects, Methods and Challenges},
  14162         journal = {Group Decision and Negociation},
  14163         volume = {10},
  14164         year = {2001},
  14165         month = mar,
  14166         pages = {199--215},
  14167         abstract = {This paper is to examine the space of negotiation opportunities for autonomous agents, to identify and evaluate some of the key techniques, and to highlight some of the major challenges for future automated negotiation research. This paper is not meant as a survey of the field of automated negotiation. Rather, the descriptions and assessments of the various approaches are generally undertaken with particular reference to work in which the authors have been involved. However, the specific issues raised should be viewed as being broadly applicable},
  14168         www_section = {automated negociation autonomous agent, negociation},
  14169         doi = {10.1023},
  14170         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Group\%20Decision\%20and\%20Negociation\%20-\%20Automated\%20Negociation.pdf},
  14171         author = {Nicholas R Jennings and Peyman Faratin and Alessio R. Lomuscio and Simon Parsons and Carles Sierra and Michael Wooldridge}
  14172 }
  14173 @conference {378347,
  14174         title = {Bayeux: an architecture for scalable and fault-tolerant wide-area data dissemination},
  14175         booktitle = {NOSSDAV '01: Proceedings of the 11th international workshop on Network and operating systems support for digital audio and video},
  14176         year = {2001},
  14177         pages = {11--20},
  14178         publisher = {ACM},
  14179         organization = {ACM},
  14180         address = {New York, NY, USA},
  14181         abstract = {The demand for streaming multimedia applications is growing at an incr edible rate. In this paper, we propose Bayeux, an efficient application-level multicast system that scales to arbitrarily large receiver groups while tolerating failures in routers and network links. Bayeux also includes specific mechanisms for load-balancing across replicate root nodes and more efficient bandwidth consumption. Our simulation results indicate that Bayeux maintains these properties while keeping transmission overhead low. To achieve these properties, Bayeux leverages the architecture of Tapestry, a fault-tolerant, wide-area overlay routing and location network},
  14182         www_section = {fault-tolerance, load balancing},
  14183         isbn = {1-58113-370-7},
  14184         doi = {10.1145/378344.378347},
  14185         url = {http://portal.acm.org/citation.cfm?id=378347$\#$},
  14186         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bayeux.pdf},
  14187         author = {Shelley Zhuang and Ben Y. Zhao and Anthony D. Joseph and Katz, Randy H. and John Kubiatowicz}
  14188 }
  14189 @article {Beimel01busesfor,
  14190         title = {Buses for Anonymous Message Delivery},
  14191         journal = {Journal of Cryptology},
  14192         volume = {16},
  14193         year = {2001},
  14194         pages = {0--2003},
  14195         abstract = {Applies graph theory to anonymity. The paper suffers from the fundamental problem that it does not discuss attacks on the scheme, and there are a couple of pretty basic ways to break anonymity. Also, the scheme uses lots of traffic; some variants end up looking much like a pipenet },
  14196         url = {http://gecko.cs.purdue.edu/gnet/papers/BD.pdf  },
  14197         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BD.pdf},
  14198         author = {Amos Beimel and Shlomi Dolev}
  14199 }
  14200 @conference {Stoica01chord:a,
  14201         title = {Chord: A Scalable Peer-to-Peer Lookup Service for Internet Applications},
  14202         booktitle = {Proceedings of the 2001 conference on Applications, technologies, architectures, and protocols for computer communications},
  14203         year = {2001},
  14204         month = jan,
  14205         pages = {149--160},
  14206         publisher = { ACM  New York, NY, USA},
  14207         organization = { ACM  New York, NY, USA},
  14208         address = {San Diego, California, United States },
  14209         abstract = {Efficiently determining the node that stores a data item in a distributed network is an important and challenging problem. This paper describes the motivation and design of the Chord system, a decentralized lookup service that stores key/value pairs for such networks. The Chord protocol takes as input an m-bit identifier (derived by hashing a higher-level application specific key), and returns the node that stores the value corresponding to that key. Each Chord node is identified by an m-bit identifier and each node stores the key identifiers in the system closest to the node's identifier. Each node maintains an m-entry routing table that allows it to look up keys efficiently. Results from theoretical analysis, simulations, and experiments show that Chord is incrementally scalable, with insertion and lookup costs scaling logarithmically with the number of Chord nodes},
  14210         www_section = {Chord, distributed hash table},
  14211         isbn = {1-58113-411-8},
  14212         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chord_sigcomm.pdf},
  14213         author = {Ion Stoica and Robert Morris and David Karger and Frans M. Kaashoek and Hari Balakrishnan}
  14214 }
  14215 @article {Sirer_cliquenet:a,
  14216         title = {CliqueNet: A Self-Organizing, Scalable, Peer-to-Peer Anonymous Communication Substrate},
  14217         year = {2001},
  14218         institution = {Cornell},
  14219         address = {Ithaca},
  14220         abstract = {Anonymity is critical for many networked applications. Yet current Internet protocols provide no support for masking the identity of communication endpoints. This paper outlines a design for a peer-to-peer, scalable, tamper-resilient communication protocol that provides strong anonymity and privacy. Called CliqueNet, our protocol provides an information-theoretic guarantee: an omnipotent adversary that can wiretap at any location in the network cannot determine the sender of a packet beyond a clique, that is, a set of k hosts, where k is an anonymizing factor chosen by the participants. CliqueNet is resilient to jamming by malicious hosts and can scale with the number of participants. This paper motivates the need for an anonymous communication layer and describes the self-organizing, novel divide-and-conquer approach that enables CliqueNet to scale while offering a strong anonymity guarantee. CliqueNet is widely applicable as a communication substrate for peer-to-peer applications that require anonymity, privacy and anti-censorship guarantees},
  14221         www_section = {anonymity, CliqueNet, DC-network},
  14222         journal = {unknown},
  14223         issn = {TR2001},
  14224         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.22.4785\&rep=rep1\&type=url\&i=0},
  14225         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cliquenet-iptp.pdf},
  14226         author = {Emin G{\"u}n Sirer and Milo Polte and Mark Robson}
  14227 }
  14228 @conference {Douceur01competitivehill-climbing,
  14229         title = {Competitive Hill-Climbing Strategies for Replica Placement in a Distributed File System},
  14230         booktitle = {In DISC},
  14231         year = {2001},
  14232         pages = {48--62},
  14233         abstract = {The Farsite distributed file system stores multiple replicas of files on multiple machines, to provide file access even when some machines are unavailable. Farsite assigns file replicas to machines so as to maximally exploit the different degrees of availability of different machines, given an allowable replication factor R. We use competitive analysis and simulation to study the performance of three candidate hillclimbing replica placement strategies, MinMax, MinRand, and RandRand, each of which successively exchanges the locations of two file replicas. We show that the MinRand and RandRand strategies are perfectly competitive for R = 2 and 2/3-competitive for R = 3. For general R, MinRand is at least 1/2-competitive and RandRand is at least 10/17-competitive. The MinMax strategy is not competitive. Simulation results show better performance than the theoretic worst-case bounds},
  14234         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.22.6802\&rep=rep1\&type=pdf},
  14235         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hill\%20climbing.pdf},
  14236         author = {John R. Douceur and Roger Wattenhofer}
  14237 }
  14238 @conference {Michiardi01core:a,
  14239         title = {CORE: A Collaborative Reputation Mechanism to enforce node cooperation in Mobile Ad hoc Networks},
  14240         booktitle = {CORE: A Collaborative Reputation Mechanism to enforce node cooperation in Mobile Ad hoc Networks},
  14241         year = {2001},
  14242         pages = {107--121},
  14243         abstract = {Countermeasures for node misbehavior and selfishness are mandatory requirements in MANET. Selfishness that causes lack of node activity cannot be solved by classical security means that aim at verifying the correctness and integrity of an operation. We suggest a generic mechanism based on reputation to enforce cooperation among the nodes of a MANET to prevent selfish behavior. Each network entity keeps track of other entities' collaboration using a technique called reputation. The reputation is calculated based on various types of information on each entity's rate of collaboration. Since there is no incentive for a node to maliciously spread negative information about other nodes, simple denial of service attacks using the collaboration technique itself are prevented. The generic mechanism can be smoothly extended to basic network functions with little impact on existing protocols},
  14244         www_section = {ad-hoc networks},
  14245         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.4100},
  14246         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/michpi-020801.pdf},
  14247         author = {Pietro Michiardi and Refik Molva}
  14248 }
  14249 @conference {Cattaneo:2001:DIT:647054.715628,
  14250         title = {The Design and Implementation of a Transparent Cryptographic File System for UNIX},
  14251         booktitle = {Proceedings of the FREENIX Track: 2001 USENIX Annual Technical Conference},
  14252         year = {2001},
  14253         month = jun,
  14254         pages = {199--212},
  14255         publisher = {USENIX Association},
  14256         organization = {USENIX Association},
  14257         address = {Boston, Massachusetts, USA},
  14258         abstract = {
  14259 Recent advances in hardware and communication technologies have made possible and cost e ective to share a file system among several machines over a local (but possibly also a wide) area network. One of the most successful and widely used such applications is Sun's Network File System (NFS). NFS is very simple in structure but assumes a very strong trust model: the user trusts the remote le system server (which might be running on a machine in di erent country) and a network with his/her data. It is easy to see that neither assumption is a very realistic one. The server (or anybody with superuser privileges) might very well read the data on its local lesytem and it is well known that the Internet or any local area network (e.g, Ethernet) is very easy to tap (see for example, Berkeley's tcpdump 7, 5] application program). Impersoni cation of users is also another security drawback of NFS. In fact, most of the permission checking over NFS are performed in the kernel of the client. In such a context a pirate can temporarely assign to his own workstation the Internet address of victim. Without secure RPC 9] no further authentication procedure is requested. From here on, the pirate can issue NFS requests presenting himself with any (false) uid and therefore accessing for reading and writing any private data on the server, even protected data.
  14260 Given the above, a user seeking a certain level of security should take some measures. Possible solutions are to use either user-level cryptography or application level cryptography. A discussion of the drawbacks of these approaches is found in 4]. A better approach is to push encryption services into the operating system as done by M. Blaze in the design of his CFS 4].
  14261 In this paper, we propose a new cryptographic le system, which we call TCFS , as a suitable solution to the problem of privacy for distributed le system (see section 2.1). Our work improves on CFS by providing a deeper integration between the encryption service and the le system which results in a complete transparency of use to the user applications},
  14262         www_section = {crytographic file system, UNIX},
  14263         isbn = {1-880446-10-3},
  14264         url = {http://dl.acm.org/citation.cfm?id=647054.715628},
  14265         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FREENIX\%2701\%20-\%20A\%20transparent\%20cryptographic\%20file\%20system\%20for\%20UNIX.pdf},
  14266         author = {Cattaneo, Giuseppe and Catuogno, Luigi and Sorbo, Aniello Del and Persiano, Pino}
  14267 }
  14268 @article {2001_1,
  14269         title = {DVD COPY CONTROL ASSOCIATION vs. ANDREW BUNNER},
  14270         journal = {unknown},
  14271         author = {unknown},
  14272         year = {2001}
  14273 }
  14274 @article {Luby01efficienterasure,
  14275         title = {Efficient erasure correcting codes},
  14276         booktitle = {Efficient erasure correcting codes},
  14277         journal = {IEEE Transactions on Information Theory},
  14278         volume = {47},
  14279         year = {2001},
  14280         pages = {569--584},
  14281         abstract = {We introduce a simple erasure recovery algorithm for codes derived from cascades of sparse bipartite graphs and analyze the algorithm by analyzing a corresponding discrete-time random process. As a result, we obtain a simple criterion involving the fractions of nodes of different degrees on both sides of the graph which is necessary and sufficient for the decoding process to finish successfully with high probability. By carefully designing these graphs we can construct for any given rate R and any given real number {\epsilon} a family of linear codes of rate R which can be encoded in time proportional to ln(1/{\epsilon}) times their block length n. Furthermore, a codeword can be recovered with high probability from a portion of its entries of length (1+{\epsilon})Rn or more. The recovery algorithm also runs in time proportional to n ln(1/{\epsilon}). Our algorithms have been implemented and work well in practice; various implementation issues are discussed},
  14282         www_section = {coding theory, recovery algorithm},
  14283         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.107.244.pdf},
  14284         author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi and Daniel A. Spielman}
  14285 }
  14286 @conference {PShuffle,
  14287         title = {An Efficient Scheme for Proving a Shuffle},
  14288         booktitle = {Proceedings of {CRYPTO} 2001},
  14289         year = {2001},
  14290         publisher = {Springer-Verlag, LNCS 2139},
  14291         organization = {Springer-Verlag, LNCS 2139},
  14292         abstract = {In this paper, we propose a novel and efficient protocol for proving the correctness of a shuffle, without leaking how the shuffle was performed. Using this protocol, we can prove the correctness of a shuffle of n data with roughly 18n exponentiations, where as the protocol of Sako-Kilian[SK95] required 642n and that of Abe[Ab99] required 22n log n. The length of proof will be only 211 n bits in our protocol, opposed to 218 n bits and 214 n log n bits required by Sako-Kilian and Abe, respectively. The proposed protocol will be a building block of an efficient, universally verifiable mix-net, whose application to voting system is prominent},
  14293         isbn = {978-3-540-42456-7},
  14294         doi = {10.1007/3-540-44647-8},
  14295         url = {http://portal.acm.org/citation.cfm?id=704279},
  14296         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PShuffle.pdf},
  14297         author = {Jun Furukawa and Kazue Sako},
  14298         editor = {Joe Kilian}
  14299 }
  14300 @conference {cl01a,
  14301         title = {An Efficient System for Non-transferable Anonymous Credentials with Optional Anonymity Revocation},
  14302         booktitle = {Proceedings of the International Conference on the Theory and Application of Cryptographic Techniques (EUROCRYPT '01)},
  14303         year = {2001},
  14304         pages = {93--118},
  14305         publisher = {Springer-Verlag},
  14306         organization = {Springer-Verlag},
  14307         address = {London, UK},
  14308         abstract = {A credential system is a system in which users can obtain credentials from organizations and demonstrate possession of these credentials. Such a system is anonymous when transactions carried out by the same user cannot be linked. An anonymous credential system is of significant practical relevance because it is the best means of providing privacy for users. In this paper we propose a practical anonymous credential system that is based on the strong RSA assumption and the decisional Diffie-Hellman assumption modulo a safe prime product and is considerably superior to existing ones: 1 We give the first practical solution that allows a user to unlinkably demonstrate possession of a credential as many times as necessary without involving the issuing organization. 2 To prevent misuse of anonymity, our scheme is the first to offer optional anonymity revocation for particular transactions. 3 Our scheme offers separability: all organizations can choose their cryptographic keys independently of each other. Moreover, we suggest more effective means of preventing users from sharing their credentials, by introducing all-or-nothing sharing: a user who allows a friend to use one of her credentials once, gives him the ability to use all of her credentials, i.e., taking over her identity. This is implemented by a new primitive, called circular encryption, which is of independent interest, and can be realized from any semantically secure cryptosystem in the random oracle model},
  14309         isbn = {3-540-42070-3},
  14310         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.63.9429},
  14311         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cl01a.pdf},
  14312         author = {Jan Camenisch and Anna Lysyanskaya}
  14313 }
  14314 @conference {747489,
  14315         title = {Extremum Feedback for Very Large Multicast Groups},
  14316         booktitle = {NGC '01: Proceedings of the Third International COST264 Workshop on Networked Group Communication},
  14317         year = {2001},
  14318         pages = {56--75},
  14319         publisher = {Springer-Verlag},
  14320         organization = {Springer-Verlag},
  14321         address = {London, UK},
  14322         abstract = {In multicast communication, it is often required that feedback is received from a potentially very large group of responders while at the same time a feedback implosion needs to be pre- vented. To this end, a number of feedback control mechanisms have been proposed, which rely either on tree-based feedback aggregation or timer-based feedback suppression. Usually, these mechanisms assume that it is not necessary to discriminate be- tween feedback from different receivers. However, for many applications this is not the case and feedback from receivers with certain response values is preferred (e.g., highest loss or largest delay)},
  14323         www_section = {multicast},
  14324         isbn = {3-540-42824-0},
  14325         url = {http://portal.acm.org/citation.cfm?id=648089.747489$\#$},
  14326         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Widmer2001g.pdf},
  14327         author = {J{\"o}rg Widmer and Thomas Fuhrmann}
  14328 }
  14329 @booklet {freedom21-security,
  14330         title = {Freedom Systems 2.1 Security Issues and Analysis},
  14331         year = {2001},
  14332         month = {May},
  14333         publisher = {Zero Knowledge Systems, {Inc.}},
  14334         type = {White Paper},
  14335         abstract = {We describe attacks to which Freedom, or Freedom users, may be vulnerable. These attacks are those that reduce the privacy of a Freedom user, through exploiting cryptographic, design or implementation issues. We include issues which may not be Freedom security issues which arise when the system is not properly used. This disclosure includes all known design or implementation flaws, as well as places where various trade-offs made while creating the system have privacy implications. We also discuss cryptographic points that are needed for a complete understanding of how Freedom works, including ones we don't believe can be used to reduce anyone's privacy},
  14336         www_section = {Freedom, privacy},
  14337         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Freedom_Security2-1.pdf},
  14338         author = {Adam Back and Ian Goldberg and Adam Shostack}
  14339 }
  14340 @conference {2001_2,
  14341         title = {A Generalisation, a Simplification and Some Applications of Paillier's Probabilistic Public-Key System},
  14342         booktitle = {Proceedings of the 4th International Workshop on Practice and Theory in Public Key Cryptography: Public Key Cryptography},
  14343         year = {2001},
  14344         publisher = {Springer-Verlag},
  14345         organization = {Springer-Verlag},
  14346         address = {London, UK, UK},
  14347         abstract = {We propose a generalisation of Paillier's probabilistic public key system, in which the expansion factor is reduced and which allows to adjust the block length of the scheme even after the public key has been fixed, without loosing the homomorphic property. We show that the generalisation is as secure as Paillier's original system.
  14348 
  14349 We construct a threshold variant of the generalised scheme as well as zero-knowledge protocols to show that a given ciphertext encrypts one of a set of given plaintexts, and protocols to verify multiplicative relations on plaintexts.
  14350 
  14351 We then show how these building blocks can be used for applying the scheme to efficient electronic voting.This reduces dramatically the work needed to compute the final result of an election, compared to the previously best known schemes.W e show how the basic scheme for a yes/no vote can be easily adapted to casting a vote for up to t out of L candidates. The same basic building blocks can also be adapted to provide receipt-free elections, under appropriate physical assumptions. The scheme for 1 out of L elections can be optimised such that for a certain range of parameter values, a ballot has size only O(log L) bits},
  14352         isbn = {3-540-41658-7},
  14353         url = {http://dl.acm.org/citation.cfm?id=648118.746742},
  14354         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Generalisation2001Damgard.pdf},
  14355         author = {Damg\aard, Ivan and Jurik, Mats}
  14356 }
  14357 @article {9999,
  14358         title = {The Gnutella Protocol Specification v0.4},
  14359         author = {TODO},
  14360         year = {2001},
  14361         journal = {unknown},
  14362         abstract = {A brief description of the gnutella protocol },
  14363         url = {http://www9.limewire.com/developer/gnutella_protocol_0.4.pdf}
  14364 }
  14365 @conference {Cabrera01herald:achieving,
  14366         title = {Herald: Achieving a Global Event Notification Service},
  14367         booktitle = {In HotOS VIII},
  14368         year = {2001},
  14369         publisher = {IEEE Computer Society},
  14370         organization = {IEEE Computer Society},
  14371         abstract = {This paper presents the design philosophy and initial design decisions of Herald: a highly scalable global event notification system that is being designed and built at Microsoft Research. Herald is a distributed system designed to transparently scale in all respects, including numbers of subscribers and publishers, numbers of event subscription points, and event delivery rates. Event delivery can occur within a single machine, within a local network or Intranet, and throughout the Internet},
  14372         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.672\&rep=rep1\&type=pdf},
  14373         author = {Luis Felipe Cabrera and Michael B. Jones and Marvin Theimer}
  14374 }
  14375 @article {Luby01improvedlow-density,
  14376         title = {Improved low-density parity-check codes using irregular graphs},
  14377         journal = {IEEE Trans. Inform. Theory},
  14378         volume = {47},
  14379         year = {2001},
  14380         pages = {585--598},
  14381         abstract = {We construct new families of error-correcting codes based on Gallager's low-density parity-check codes. We improve on Gallager's results by introducing irregular parity-check matrices and a new rigorous analysis of hard-decision decoding of these codes. We also provide efficient methods for finding good irregular structures for such decoding algorithms. Our rigorous analysis based on martingales, our methodology for constructing good irregular codes, and the demonstration that irregular structure improves performance constitute key points of our contribution. We also consider irregular codes under belief propagation. We report the results of experiments testing the efficacy of irregular codes on both binary-symmetric and Gaussian channels. For example, using belief propagation, for rate I R codes on 16 000 bits over a binary-symmetric channel, previous low-density parity-check codes can correct up to approximately 16 \% errors, while our codes correct over 17\%. In some cases our results come very close to reported results for turbo codes, suggesting that variations of irregular low density parity-check codes may be able to match or beat turbo code performance. Index Terms{\textemdash}Belief propagation, concentration theorem, Gallager codes, irregular codes, low-density parity-check codes},
  14382         www_section = {coding theory, low-density parity-check},
  14383         issn = {0018-9448 },
  14384         doi = {10.1109/18.910576 },
  14385         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.137.6057},
  14386         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/errorsIT.pdf},
  14387         author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi and Daniel A. Spielman}
  14388 }
  14389 @conference {Golle01incentivesfor,
  14390         title = {Incentives for Sharing in Peer-to-Peer Networks},
  14391         booktitle = {EC'01: Proceedings of the Second International Workshop on Electronic Commerce},
  14392         year = {2001},
  14393         pages = {75--87},
  14394         publisher = {Springer-Verlag},
  14395         organization = {Springer-Verlag},
  14396         address = {London, UK},
  14397         abstract = {We consider the free-rider problem in peer-to-peer file sharing networks such as Napster: that individual users are provided with no incentive for adding value to the network. We examine the design implications of the assumption that users will selfishly act to maximize their own rewards, by constructing a formal game theoretic model of the system and analyzing equilibria of user strategies under several novel payment mechanisms. We support and extend this work with results from experiments with a multi-agent reinforcement learning model},
  14398         www_section = {free-riding, incentives},
  14399         isbn = {3-540-42878-X},
  14400         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.23.9004.pdf},
  14401         author = {Philippe Golle and Kevin Leyton-Brown and Ilya Mironov and Mark Lillibridge}
  14402 }
  14403 @article {beimel01informationtheoretic,
  14404         title = {Information-Theoretic Private Information Retrieval: A Unified Construction},
  14405         journal = {Lecture Notes in Computer Science},
  14406         volume = {2076},
  14407         year = {2001},
  14408         pages = {89--98},
  14409         abstract = {A Private Information Retrieval (PIR) protocol enables a user to retrieve a data item from a database while hiding the identity of the item being retrieved. In a t-private, k-server PIR protocol the database is replicated among k servers, and the user's privacy is protected from any collusion of up to t servers. The main cost-measure of such protocols is the communication complexity of retrieving a single bit of data.
  14410 This work addresses the information-theoretic setting for PIR, in which the user's privacy should be unconditionally protected from collusions of servers. We present a unified general construction, whose abstract components can be instantiated to yield both old and new families of PIR protocols. A main ingredient in the new protocols is a generalization of a solution by Babai, Kimmel, and Lokam to a communication complexity problem in the so-called simultaneous messages model.
  14411 Our construction strictly improves upon previous constructions and resolves some previous anomalies. In particular, we obtain: (1) t-private k-server PIR protocols with O(n 1/{\lfllor} (2k-1)/tc{\rfloor}) communication bits, where n is the database size. For t > 1, this is a substantial asymptotic improvement over the previous state of the art; (2) a constant-factor improvement in the communication complexity of 1-private PIR, providing the first improvement to the 2-server case since PIR protocols were introduced; (3) efficient PIR protocols with logarithmic query length. The latter protocols have applications to the construction of efficient families of locally decodable codes over large alphabets and to PIR protocols with reduced work by the servers},
  14412         www_section = {communication complexity, privacy, private information retrieval},
  14413         isbn = {978-3-540-42287-7},
  14414         issn = {0302-9743},
  14415         doi = {10.1007/3-540-48224-5},
  14416         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.2796},
  14417         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/beimel01informationtheoretic.pdf},
  14418         author = {Amos Beimel and Yuval Ishai}
  14419 }
  14420 @conference {Department01instrumentingthe,
  14421         title = {Instrumenting The World With Wireless Sensor Networks},
  14422         booktitle = {In International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2001)},
  14423         year = {2001},
  14424         pages = {2033--2036},
  14425         abstract = {Pervasive micro-sensing and actuation may revolutionize the way in which we understand and manage complex physical systems: from airplane wings to complex ecosystems. The capabilities for detailed physical monitoring and manipulation offer enormous opportunities for almost every scientific discipline, and it will alter the feasible granularity of engineering},
  14426         www_section = {wireless sensor network},
  14427         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.3027},
  14428         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.3027.pdf},
  14429         author = {Deborah Estrin and Gregory J. Pottie and L. Girod and Mani Srivastava}
  14430 }
  14431 @conference {2001_3,
  14432         title = {Investigating the energy consumption of a wireless network interface in an ad hoc networking environment},
  14433         booktitle = { INFOCOM 2001. Twentieth Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings. IEEE },
  14434         year = {2001},
  14435         month = apr,
  14436         address = {Anchorage, AK , USA},
  14437         abstract = {Energy-aware design and evaluation of network protocols requires knowledge of the energy consumption behavior of actual wireless interfaces. But little practical information is available about the energy consumption behavior of well-known wireless network interfaces and device specifications do not provide information in a form that is helpful to protocol developers. This paper describes a series of experiments which obtained detailed measurements of the energy consumption of an IEEE 802.11 wireless network interface operating in an ad hoc networking environment. The data is presented as a collection of linear equations for calculating the energy consumed in sending, receiving and discarding broadcast and point-to-point data packets of various sizes. Some implications for protocol design and evaluation in ad hoc networks are discussed},
  14438         www_section = {ad-hoc networks, energy consumption, IEEE 802.11},
  14439         isbn = {0-7803-7016-3 },
  14440         doi = {10.1109/INFCOM.2001.916651  },
  14441         url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=916651},
  14442         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infocom01investigating.pdf},
  14443         author = {Feeney, Laura Marie and Nilsson, Martin}
  14444 }
  14445 @conference {502052,
  14446         title = {A low-bandwidth network file system},
  14447         booktitle = {SOSP '01: Proceedings of the eighteenth ACM symposium on Operating systems principles},
  14448         year = {2001},
  14449         pages = {174--187},
  14450         publisher = {ACM},
  14451         organization = {ACM},
  14452         address = {New York, NY, USA},
  14453         abstract = {Users rarely consider running network file systems over slow or wide-area networks, as the performance would be unacceptable and the bandwidth consumption too high. Nonetheless, efficient remote file access would often be desirable over such networks---particularly when high latency makes remote login sessions unresponsive. Rather than run interactive programs such as editors remotely, users could run the programs locally and manipulate remote files through the file system. To do so, however, would require a network file system that consumes less bandwidth than most current file systems.This paper presents LBFS, a network file system designed for low-bandwidth networks. LBFS exploits similarities between files or versions of the same file to save bandwidth. It avoids sending data over the network when the same data can already be found in the server's file system or the client's cache. Using this technique in conjunction with conventional compression and caching, LBFS consumes over an order of magnitude less bandwidth than traditional network file systems on common workloads},
  14454         www_section = {file systems, workload characterization},
  14455         isbn = {1-58113-389-8},
  14456         doi = {10.1145/502034.502052},
  14457         url = {http://portal.acm.org/citation.cfm?id=502052$\#$},
  14458         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lbfs.pdf},
  14459         author = {Muthitacharoen, Athicha and Chen, Benjie and David Mazi{\`e}res}
  14460 }
  14461 @book {2001_4,
  14462         title = {Multiparty Computation from Threshold Homomorphic Encryption},
  14463         booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT 2001},
  14464         series = {Lecture Notes in Computer Science},
  14465         volume = {2045},
  14466         year = {2001},
  14467         pages = {280--300},
  14468         publisher = {Springer Berlin Heidelberg},
  14469         organization = {Springer Berlin Heidelberg},
  14470         abstract = {We introduce a new approach to multiparty computation (MPC) basing it on homomorphic threshold crypto-systems. We show that given keys for any sufficiently efficient system of this type,general MPC protocols for n parties can be devised which are secure against an active adversary that corrupts any minority of the parties. The total number of bits broadcast is O(nk|C|),where k is the security parameter and |C| is the size of a (Boolean) circuit computing the function to be securely evaluated. An earlier proposal by Franklin and Haber with the same complexity was only secure for passive adversaries,while all earlier protocols with active security had complexity at least quadratic in n. We give two examples of threshold cryptosystems that can support our construction and lead to the claimed complexities},
  14471         isbn = {978-3-540-42070-5},
  14472         doi = {10.1007/3-540-44987-6_18},
  14473         url = {http://dx.doi.org/10.1007/3-540-44987-6_18},
  14474         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiPartyComputation2001Cramer.pdf},
  14475         author = {Cramer, Ronald and Damg{\'a}rd, Ivan and Nielsen, JesperB},
  14476         editor = {Pfitzmann, Birgit}
  14477 }
  14478 @conference {jakobsson-optimally,
  14479         title = {An Optimally Robust Hybrid Mix Network (Extended Abstract)},
  14480         booktitle = {Proceedings of Principles of Distributed Computing--{PODC} '01},
  14481         year = {2001},
  14482         publisher = {ACM Press},
  14483         organization = {ACM Press},
  14484         abstract = {We present a mix network that achieves efficient integration of public-key and symmetric-key operations. This hybrid mix network is capable of natural processing of arbitrarily long input elements, and is fast in both practical and asymptotic senses. While the overhead in the size of input elements is linear in the number of mix servers, it is quite small in practice. In contrast to previous hybrid constructions, ours has optimal robustness, that is, robustness against any minority coalition of malicious servers},
  14485         www_section = {mix, public key cryptography, robustness},
  14486         isbn = {1-58113-383-9},
  14487         url = {http://portal.acm.org/citation.cfm?id=383962.384046},
  14488         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.24.8205.pdf},
  14489         author = {Jakobsson, Markus and Ari Juels}
  14490 }
  14491 @conference {Druschel01past:a,
  14492         title = {PAST: A large-scale, persistent peer-to-peer storage utility},
  14493         booktitle = {In HotOS VIII},
  14494         year = {2001},
  14495         pages = {75--80},
  14496         abstract = {This paper sketches the design of PAST, a large-scale, Internet-based, global storage utility that provides scalability, high availability, persistence and security. PAST is a peer-to-peer Internet application and is entirely selforganizing. PAST nodes serve as access points for clients, participate in the routing of client requests, and contribute storage to the system. Nodes are not trusted, they may join the system at any time and may silently leave the system without warning. Yet, the system is able to provide strong assurances, efficient storage access, load balancing and scalability},
  14497         www_section = {peer-to-peer storage},
  14498         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.1674},
  14499         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.1.1674.pdf},
  14500         author = {Peter Druschel and Antony Rowstron}
  14501 }
  14502 @conference {697650,
  14503         title = {Pastry: Scalable, Decentralized Object Location, and Routing for Large-Scale Peer-to-Peer Systems},
  14504         booktitle = {Middleware '01: Proceedings of the IFIP/ACM International Conference on Distributed Systems Platforms Heidelberg},
  14505         year = {2001},
  14506         pages = {329--350},
  14507         publisher = {Springer-Verlag},
  14508         organization = {Springer-Verlag},
  14509         address = {London, UK},
  14510         abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications.Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data
  14511 storage, data sharing, group communication and naming. Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops.
  14512 Pastry is completely decentralized, scalable, and self-organizing; it automatically adapts to the arrival, departure and failure of nodes. Experimental results obtained with a prototype implementation on an emulated network of up to 100,000 nodes confirm Pastry's scalability and efficiency, its ability to self-organize and adapt to node failures, and its good network locality properties},
  14513         www_section = {overlay networks, P2P},
  14514         isbn = {3-540-42800-3},
  14515         url = {http://portal.acm.org/citation.cfm?id=697650$\#$},
  14516         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pastry.pdf},
  14517         author = {Antony Rowstron and Peter Druschel}
  14518 }
  14519 @conference {Rowstron01pastry:scalable,
  14520         title = {Pastry: Scalable, decentralized object location and routing for large-scale peer-to-peer systems},
  14521         booktitle = {Middleware'01--Proceedings of the IFIP/ACM International Conference on Distributed Systems Platforms},
  14522         series = {Lecture Notes in Computer Science},
  14523         volume = {2218},
  14524         year = {2001},
  14525         month = nov,
  14526         pages = {329--350},
  14527         publisher = {Springer-Verlag},
  14528         organization = {Springer-Verlag},
  14529         address = {Heidelberg, Germany},
  14530         abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications. Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data storage, data sharing, group communication and naming.
  14531 Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops
  14532 Pastry is completely decentralized, scalable, and self-organizing; it automatically adapts to the arrival, departure and failure of nodes. Experimental results obtained with a prototype implementation on an emulated network of up to 100,000 nodes confirm Pastry's scalability and efficiency, its ability to self-organize and adapt to node failures, and its good network locality properties
  14533 Work done in part while visiting Microsoft Research, Cambridge, UK},
  14534         www_section = {distributed hash table, Pastry},
  14535         isbn = {3-540-42800-3},
  14536         doi = {10.1007/3-540-45518-3_18},
  14537         url = {http://www.cs.rice.edu/~druschel/publications/Pastry.pdf},
  14538         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Pastry.pdf},
  14539         author = {Antony Rowstron and Peter Druschel}
  14540 }
  14541 @book {558412,
  14542         title = {Peer-to-Peer: Harnessing the Power of Disruptive Technologies},
  14543   author={Oram, Andy},
  14544         year = {2001},
  14545         publisher = {O'Reilly \& Associates, Inc},
  14546         organization = {O'Reilly \& Associates, Inc},
  14547         address = {Sebastopol, CA, USA},
  14548         abstract = {Upstart software projects Napster, Gnutella, and Freenet have dominated newspaper headlines, challenging traditional approaches to content distribution with their revolutionary use of peer-to-peer file-sharing technologies. Reporters try to sort out the ramifications of seemingly ungoverned peer-to-peer networks. Lawyers, business leaders, and social commentators debate the virtues and evils of these bold new distributed systems. But what's really behind such disruptive technologies -- the breakthrough innovations that have rocked the music and media worlds? And what lies ahead?
  14549 In this book, key peer-to-peer pioneers take us beyond the headlines and hype and show how the technology is changing the way we communicate and exchange information. Those working to advance peer-to-peer as a technology, a business opportunity, and an investment offer their insights into how the technology has evolved and where it's going. They explore the problems they've faced, the solutions they've discovered, the lessons they've learned, and their goals for the future of computer networking.
  14550 
  14551 Until now, Internet communities have been limited by the flat interactive qualities of email and network newsgroups, where people can exchange recommendations and ideas but have great difficulty commenting on one another's postings, structuring information, performing searches, and creating summaries. Peer-to-peer challenges the traditional authority of the client/server model, allowing shared information to reside instead with producers and users. Peer-to-peer networks empower users to collaborate on producing and consuming information, adding to it, commenting on it, and building communities around it.
  14552 
  14553 This compilation represents the collected wisdom of today's peer-to-peer luminaries. It includes contributions from Gnutella's Gene Kan, Freenet's Brandon Wiley, Jabber's Jeremie Miller, and many others -- plus serious discussions of topics ranging from accountability and trust to security and performance. Fraught with questions and promise, peer-to-peer is sure to remain on the computer industry's center stage for years to come},
  14554         isbn = {059600110X},
  14555         url = {http://portal.acm.org/citation.cfm?id=558412$\#$},
  14556         editor = {Andy oram}
  14557 }
  14558 @book {2001_5,
  14559         title = {Peer-To-Peer: Harnessing the Power of Disruptive Technologies -- Chapter 12: Free Haven},
  14560         year = {2001},
  14561         publisher = {O'Reilly Media},
  14562         organization = {O'Reilly Media},
  14563         abstract = {Description of the problems that arise when one tries to combine anonymity and accountability. Note that the Free Haven design described here charges for storing data in the network (downloads are free), whereas in GNUnet adding data is free and only the downloads are considered as utilization },
  14564         author = {Roger Dingledine and Michael J. Freedman and David Molnar},
  14565         editor = {Andy oram}
  14566 }
  14567 @booklet {Chen01poblano:a,
  14568         title = {Poblano: A distributed trust model for peer-to-peer networks},
  14569         year = {2001},
  14570         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.7489\&rep=rep1\&type=pdf},
  14571         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.106.7489.pdf},
  14572         author = {Rita Chen and William Yeager}
  14573 }
  14574 @booklet {Batten01pstore:a,
  14575         title = {pStore: A Secure Peer-to-Peer Backup System},
  14576         year = {2001},
  14577         abstract = {In an effort to combine research in peer-to-peer systems with techniques for incremental backup systems, we propose pStore: a secure distributed backup system based on an adaptive peer-to-peer network. pStore exploits unused personal hard drive space attached to the Internet to provide the distributed redundancy needed for reliable and effective data backup. Experiments on a 30 node network show that 95\% of the files in a 13 MB dataset can be retrieved even when 7 of the nodes have failed. On top of this reliability, pStore includes support for file encryption, versioning, and secure sharing. Its custom versioning system permits arbitrary version retrieval similar to CVS. pStore provides this functionality at less than 10\% of the network bandwidth and requires 85\% less storage capacity than simpler local tape backup schemes for a representative workload},
  14578         www_section = {P2P, robustness},
  14579         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.3444},
  14580         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3444.pdf},
  14581         author = {Christopher Batten and Kenneth Barr and Arvind Saraf and Stanley Trepetin}
  14582 }
  14583 @booklet {Hubaux01thequest,
  14584         title = {The Quest for Security in Mobile Ad Hoc Networks},
  14585         year = {2001},
  14586         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.130.6088\&rep=rep1\&type=pdf},
  14587         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.130.6088.pdf},
  14588         author = {Jean-Pierre Hubaux and Levente Butty{\'a}n and Srdan Capkun}
  14589 }
  14590 @conference {501437,
  14591         title = {The quest for security in mobile ad hoc networks},
  14592         booktitle = {MobiHoc '01: Proceedings of the 2nd ACM international symposium on Mobile ad hoc networking \& computing},
  14593         year = {2001},
  14594         pages = {146--155},
  14595         publisher = {ACM},
  14596         organization = {ACM},
  14597         address = {New York, NY, USA},
  14598         abstract = {So far, research on mobile ad hoc networks has been forcused primarily on routing issues. Security, on the other hand, has been given a lower priority. This paper provides an overview of security problems for mobile ad hoc networks, distinguishing the threats on basic mechanisms and on security mechanisms. It then describes our solution to protect the security mechanisms. The original features of this solution include that (i) it is fully decentralized and (ii) all nodes are assigned equivalent roles},
  14599         www_section = {ad-hoc networks, routing},
  14600         isbn = {1-58113-428-2},
  14601         doi = {10.1145/501436.501437},
  14602         url = {http://portal.acm.org/citation.cfm?id=501437$\#$},
  14603         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Quest01.pdf},
  14604         author = {Jean-Pierre Hubaux and Levente Butty{\'a}n and Srdan Capkun}
  14605 }
  14606 @conference {patterns-failure,
  14607         title = {Real World Patterns of Failure in Anonymity Systems},
  14608         booktitle = {Proceedings of Information Hiding Workshop (IH 2001)},
  14609         year = {2001},
  14610         month = {April},
  14611         pages = {230--244},
  14612         publisher = {Springer-Verlag, LNCS 2137},
  14613         organization = {Springer-Verlag, LNCS 2137},
  14614         abstract = {We present attacks on the anonymity and pseudonymity provided by a "lonely hearts" dating service and by the HushMail encrypted email system. We move on to discuss some generic attacks upon anonymous systems based on the engineering reality of these systems rather than the theoretical foundations on which they are based. However, for less sophisticated users it is social engineering attacks, owing nothing to computer science, that pose the biggest day-to-day danger. This practical experience then permits a start to be made on developing a security policy model for pseudonymous communications},
  14615         www_section = {pseudonym, security policy},
  14616         isbn = {3-540-42733-3},
  14617         url = {http://portal.acm.org/citation.cfm?id=731864},
  14618         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Patterns_of_Failure.pdf},
  14619         author = {Richard Clayton and George Danezis and Markus G. Kuhn},
  14620         editor = {Ira S. Moskowitz}
  14621 }
  14622 @booklet {Dingledine01areputation,
  14623         title = {A Reputation System to Increase MIX-net Reliability},
  14624         year = {2001},
  14625         abstract = {We describe a design for a reputation system that increases the reliability and thus efficiency of remailer services. Our reputation system uses a MIX-net in which MIXes give receipts for intermediate messages. Together with a set of witnesses, these receipts allow senders to verify the correctness of each MIX and prove misbehavior to the witnesses},
  14626         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.7912\&rep=rep1\&type=pdf},
  14627         author = {Roger Dingledine and Michael J. Freedman and David Hopwood and David Molnar}
  14628 }
  14629 @conference {mix-acc,
  14630         title = {A Reputation System to Increase MIX-net Reliability},
  14631         booktitle = {Proceedings of Information Hiding Workshop (IH 2001)},
  14632         year = {2001},
  14633         month = {April},
  14634         pages = {126--141},
  14635         publisher = {Springer-Verlag, LNCS 2137},
  14636         organization = {Springer-Verlag, LNCS 2137},
  14637         abstract = {We describe a design for a reputation system that increases the reliability and thus efficiency of remailer services. Our reputation system uses a MIX-net in which MIXes give receipts for intermediate messages. Together with a set of witnesses, these receipts allow senders to verify the correctness of each MIX and prove misbehavior to the witnesses},
  14638         www_section = {remailer},
  14639         isbn = {978-3-540-42733-9},
  14640         doi = {10.1007/3-540-45496-9},
  14641         url = {http://www.springerlink.com/content/ej8qv86wdkeukjc5/},
  14642         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mix-acc.pdf},
  14643         author = {Roger Dingledine and Michael J. Freedman and David Hopwood and David Molnar},
  14644         editor = {Ira S. Moskowitz}
  14645 }
  14646 @article {502048,
  14647         title = {Resilient overlay networks},
  14648         journal = {SIGOPS Oper. Syst. Rev},
  14649         volume = {35},
  14650         number = {5},
  14651         year = {2001},
  14652         pages = {131--145},
  14653         publisher = {ACM},
  14654         address = {New York, NY, USA},
  14655         abstract = {A Resilient Overlay Network (RON) is an architecture that allows distributed Internet applications to detect and recover from path outages and periods of degraded performance within several seconds, improving over today's wide-area routing protocols that take at least several minutes to recover. A RON is an application-layer overlay on top of the existing Internet routing substrate. The RON nodes monitor the functioning and quality of the Internet paths among themselves, and use this information to decide whether to route packets directly over the Internet or by way of other RON nodes, optimizing application-specific routing metrics.Results from two sets of measurements of a working RON deployed at sites scattered across the Internet demonstrate the benefits of our architecture. For instance, over a 64-hour sampling period in March 2001 across a twelve-node RON, there were 32 significant outages, each lasting over thirty minutes, over the 132 measured paths. RON's routing mechanism was able to detect, recover, and route around all of them, in less than twenty seconds on average, showing that its methods for fault detection and recovery work well at discovering alternate paths in the Internet. Furthermore, RON was able to improve the loss rate, latency, or throughput perceived by data transfers; for example, about 5\% of the transfers doubled their TCP throughput and 5\% of our transfers saw their loss probability reduced by 0.05. We found that forwarding packets via at most one intermediate RON node is sufficient to overcome faults and improve performance in most cases. These improvements, particularly in the area of fault detection and recovery, demonstrate the benefits of moving some of the control over routing into the hands of end-systems},
  14656         www_section = {resilient overlay network},
  14657         issn = {0163-5980},
  14658         doi = {10.1145/502059.502048},
  14659         url = {http://portal.acm.org/citation.cfm?id=502059.502048$\#$},
  14660         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ron-sosp2001.pdf},
  14661         author = {Andersen, David and Hari Balakrishnan and Frans M. Kaashoek and Robert Morris}
  14662 }
  14663 @conference {Scarlata01responderanonymity,
  14664         title = {Responder Anonymity and Anonymous Peer-to-Peer File Sharing},
  14665         booktitle = {Network Protocols, 2001. Ninth International Conference on },
  14666         year = {2001},
  14667         month = nov,
  14668         abstract = {Data transfer over TCP/IP provides no privacy for network users. Previous research in anonymity has focused on the provision of initiator anonymity. We explore methods of adapting existing initiator-anonymous protocols to provide responder anonymity and mutual anonymity. We present Anonymous Peer-to-peer File Sharing (APFS) protocols, which provide mutual anonymity for peer-topeer le sharing. APFS addresses the problem of longlived Internet services that may outlive the degradation present in current anonymous protocols. One variant of APFS makes use of unicast communication, but requires a central coordinator to bootstrap the protocol. A second variant takes advantage of multicast routing to remove the need for any central coordination point. We compare the TCP performance of APFS protocol to existing overt le sharing systems such as Napster. In providing anonymity, APFS can double transfer times and requires that additional trac be carried by peers, but this overhead is constant with the size of the session. 1},
  14669         www_section = {anonymity, APFS, multicast},
  14670         isbn = {0-7695-1429-4 },
  14671         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.7821\&rep=rep1\&type=url\&i=0},
  14672         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/APFS.pdf},
  14673         author = {Vincent Scarlata and Brian Neil Levine and Clay Shields}
  14674 }
  14675 @conference {383072,
  14676         title = {A scalable content-addressable network},
  14677         booktitle = {SIGCOMM '01: Proceedings of the 2001 conference on Applications, technologies, architectures, and protocols for computer communications},
  14678         year = {2001},
  14679         pages = {161--172},
  14680         publisher = {ACM},
  14681         organization = {ACM},
  14682         address = {New York, NY, USA},
  14683         abstract = {Hash tables--which map "keys" onto "values"--are an essential building block in modern software systems. We believe a similar functionality would be equally valuable to large distributed systems. In this paper, we introduce the concept of a Content-Addressable Network (CAN) as a distributed infrastructure that provides hash table-like functionality on Internet-like scales. The CAN is scalable, fault-tolerant and completely self-organizing, and we demonstrate its scalability, robustness and low-latency properties through simulation},
  14684         www_section = {CAN, fault-tolerance, robustness},
  14685         isbn = {1-58113-411-8},
  14686         doi = {10.1145/383059.383072},
  14687         url = {http://portal.acm.org/citation.cfm?id=383072$\#$},
  14688         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.8434.pdf},
  14689         author = {Sylvia Paul Ratnasamy and Paul Francis and Handley, Mark and Richard Karp and S Shenker}
  14690 }
  14691 @article { 10.1109/P2P.2001.990421,
  14692         title = {Search in JXTA and Other Distributed Networks},
  14693         journal = {Peer-to-Peer Computing, IEEE International Conference on},
  14694         year = {2001},
  14695         pages = {0--0030},
  14696         publisher = {IEEE Computer Society},
  14697         address = {Los Alamitos, CA, USA},
  14698         isbn = {0-7695-1503-7},
  14699         doi = {http://doi.ieeecomputersociety.org/10.1109/P2P.2001.990421},
  14700         url = {https://bibliography.gnunet.org},
  14701         author = {Sherif Botros and Steve Waterhouse}
  14702 }
  14703 @article {cheap-pseudonyms,
  14704         title = {The social cost of cheap pseudonyms},
  14705         journal = {Journal of Economics and Management Strategy},
  14706         volume = {10},
  14707         number = {2},
  14708         year = {2001},
  14709         pages = {173--199},
  14710         abstract = {We consider the problems of societal norms for cooperation and reputation when it is possible to obtain cheap pseudonyms, something that is becoming quite common in a wide variety of interactions on the Internet. This introduces opportunities to misbehave without paying reputational consequences. A large degree of cooperation can still emerge, through a convention in which newcomers "pay their dues" by accepting poor treatment from players who have established positive reputations. One might hope for an open society where newcomers are treated well, but there is an inherent social cost in making the spread of reputations optional. We prove that no equilibrium can sustain significantly more cooperation than the dues-paying equilibrium in a repeated random matching game with a large number of players in which players have finite lives and the ability to change their identities, and there is a small but nonvanishing probability of mistakes. Although one could remove the inefficiency of mistreating newcomers by disallowing anonymity, this is not practical or desirable in a wide variety of transactions. We discuss the use of entry fees, which permits newcomers to be trusted but excludes some players with low payoffs, thus introducing a different inefficiency. We also discuss the use of free but unreplaceable pseudonyms, and describe a mechanism that implements them using standard encryption techniques, which could be practically implemented in electronic transactions},
  14711         www_section = {pseudonym},
  14712         doi = {10.1111/j.1430-9134.2001.00173.x},
  14713         url = {http://www3.interscience.wiley.com/journal/119023370/abstract?CRETRY=1\&SRETRY=0},
  14714         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/081199.pdf},
  14715         author = {Eric Friedman and Paul Resnick}
  14716 }
  14717 @conference {strong-eternity,
  14718         title = {The Strong Eternity Service},
  14719         booktitle = {Proceedings of Information Hiding Workshop (IH 2001)},
  14720         year = {2001},
  14721         month = {April},
  14722         publisher = {Springer-Verlag, LNCS 2137},
  14723         organization = {Springer-Verlag, LNCS 2137},
  14724         abstract = {Strong Eternity Service is a safe and very reliable storage for data of high importance. We show how to establish persistent pseudonyms in a totally anonymous environment and how to create a unique fully distributed name-space allowing both computer-efficient and human-acceptable access. We also present a way how to retrieve information from such data storage. We adapt the notion of the mix-network so that it can provide symmetric anonymity to both the client and the server. Finally we propose a system of after-the-act payments that can support operation of the Service without compromising anonymity},
  14725         www_section = {anonymity service, distributed name-space, pseudonym},
  14726         isbn = {978-3-540-42733-9},
  14727         doi = {10.1007/3-540-45496-9},
  14728         url = {http://portal.acm.org/citation.cfm?id=731726},
  14729         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/strong-eternity.pdf},
  14730         author = {Tonda Benes},
  14731         editor = {Ira S. Moskowitz}
  14732 }
  14733 @conference {Waldman01tangler:a,
  14734         title = {Tangler: A Censorship-Resistant Publishing System Based On Document Entanglements},
  14735         booktitle = {In Proceedings of the 8th ACM Conference on Computer and Communications Security},
  14736         year = {2001},
  14737         pages = {126--135},
  14738         abstract = {The basic idea is to protect documents by making it impossible to remove one document from the system without loosing others. The underlying assumption that the adversary cares about collateral damage of this kind is a bit far fetched. Also, the entanglement doubles the amount of data that needs to be moved to retrieve a document},
  14739         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.3781\&rep=rep1\&type=pdf},
  14740         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tangler.pdf},
  14741         author = {Marc Waldman and David Mazi{\`e}res}
  14742 }
  14743 @conference {waldman01tangler,
  14744         title = {Tangler: a censorship-resistant publishing system based on document entanglements},
  14745         booktitle = {Proceedings of the 8th ACM Conference on Computer and Communications Security (CCS 2001)},
  14746         year = {2001},
  14747         month = {November},
  14748         pages = {126--135},
  14749         abstract = {We describe the design of a censorship-resistant system that employs a unique document storage mechanism. Newly published documents are dependent on the blocks of previously published documents. We call this dependency an entanglement. Entanglement makes replication of previously published content an intrinsic part of the publication process. Groups of files, called collections, can be published together and named in a host-independent manner. Individual documents within a collection can be securely updated in such a way that future readers of the collection see and tamper-check the updates. The system employs a self-policing network of servers designed to eject non-compliant servers and prevent them from doing more harm than good},
  14750         www_section = {censorship resistance, host-independent, self-policing network},
  14751         isbn = {1-58113-385-5},
  14752         doi = {10.1145/501983.502002},
  14753         url = {http://portal.acm.org/citation.cfm?id=501983.502002\&coll=GUIDE\&dl=GUIDE\&type=series\&idx=SERIES320\&part=series\&WantType=Proceedings\&title=CCS\&CFID=75729899\&CFTOKEN=36385677},
  14754         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.24.3781.pdf},
  14755         author = {Marc Waldman and David Mazi{\`e}res}
  14756 }
  14757 @booklet {Zhao01tapestry:an,
  14758         title = {Tapestry: An Infrastructure for Fault-tolerant Wide-area Location and Routing},
  14759         year = {2001},
  14760         abstract = {In today's chaotic network, data and services are mobile and replicated widely for availability, durability, and locality. Components' within this infrastructure interact in rich and complex ways, greatly stressing traditional approaches to name service and routing. This paper explores an alternative to traditional approaches called Tapestry. Tapestry is an overlay location and routing infrastructure that provides location-independent routing of messages directly to the closest copy of an object or service using only point-to-point links and without centralized resources. The routing and directory information within this' infrastructure is purely soft state and easily repaired. Tapestry is self-administering, fault-tolerant, and resilient under load. This paper presents' the architecture and algorithms of Tapestry and explores their advantages through a number of experiments},
  14761         url = {http://portal.acm.org/citation.cfm?id=894116$\#$},
  14762         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-01-1141.pdf},
  14763         author = {Ben Y. Zhao and Ben Y. Zhao and John Kubiatowicz and John Kubiatowicz and Anthony D. Joseph and Anthony D. Joseph}
  14764 }
  14765 @book {2001_6,
  14766         title = {The Theory of Incentives: The Principal-Agent Model},
  14767         year = {2001},
  14768         pages = {0--360},
  14769         publisher = {Princeton University Press},
  14770         organization = {Princeton University Press},
  14771         address = {Princeton, New Jersey, USA},
  14772         abstract = {Economics has much to do with incentives--not least, incentives to work hard, to produce quality products, to study, to invest, and to save. Although Adam Smith amply confirmed this more than two hundred years ago in his analysis of sharecropping contracts, only in recent decades has a theory begun to emerge to place the topic at the heart of economic thinking. In this book, Jean-Jacques Laffont and David Martimort present the most thorough yet accessible introduction to incentives theory to date. Central to this theory is a simple question as pivotal to modern-day management as it is to economics research: What makes people act in a particular way in an economic or business situation? In seeking an answer, the authors provide the methodological tools to design institutions that can ensure good incentives for economic agents.
  14773 
  14774 This book focuses on the principal-agent model, the "simple" situation where a principal, or company, delegates a task to a single agent through a contract--the essence of management and contract theory. How does the owner or manager of a firm align the objectives of its various members to maximize profits? Following a brief historical overview showing how the problem of incentives has come to the fore in the past two centuries, the authors devote the bulk of their work to exploring principal-agent models and various extensions thereof in light of three types of information problems: adverse selection, moral hazard, and non-verifiability. Offering an unprecedented look at a subject vital to industrial organization, labor economics, and behavioral economics, this book is set to become the definitive resource for students, researchers, and others who might find themselves pondering what contracts, and the incentives they embody, are really all about},
  14775         www_section = {economics, principal-agent model},
  14776         isbn = {9780691091846},
  14777         url = {http://press.princeton.edu/chapters/i7311.html},
  14778         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Laffont\%20\%26\%20Martimort\%20-\%20The\%20Theory\%20of\%20Incentives.pdf},
  14779         author = {Jean-Jacques Laffont and David Martimort}
  14780 }
  14781 @conference {back01,
  14782         title = {Traffic Analysis Attacks and Trade-Offs in Anonymity Providing Systems},
  14783         booktitle = {Proceedings of Information Hiding Workshop (IH 2001)},
  14784         year = {2001},
  14785         month = {April},
  14786         pages = {245--257},
  14787         publisher = {Springer-Verlag, LNCS 2137},
  14788         organization = {Springer-Verlag, LNCS 2137},
  14789         abstract = {We discuss problems and trade-offs with systems providing anonymity for web browsing (or more generally any communication system that requires low latency interaction). We focus on two main systems: the Freedom network [12] and PipeNet [8]. Although Freedom is efficient and reasonably secure against denial of service attacks, it is vulnerable to some generic traffic analysis attacks, which we describe. On the other hand, we look at PipeNet, a simple theoretical model which protects against the traffic analysis attacks we point out, but is vulnerable to denial of services attacks and has efficiency problems. In light of these observations, we discuss the trade-offs that one faces when trying to construct an efficient low latency communication system that protects users anonymity},
  14790         www_section = {anonymity, Freedom, latency, Pipenet},
  14791         isbn = {978-3-540-42733-9},
  14792         doi = {10.1007/3-540-45496-9},
  14793         url = {http://www.springerlink.com/content/4gpwtejkkvadcdcm/},
  14794         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/traffic_0.pdf},
  14795         author = {Adam Back and Ulf M{\"o}ller and Anton Stiglic},
  14796         editor = {Ira S. Moskowitz}
  14797 }
  14798 @conference {shuffle:ccs01,
  14799         title = {A Verifiable Secret Shuffle and its Application to E-Voting},
  14800         booktitle = {Proceedings of the 8th ACM Conference on Computer and Communications Security (CCS 2001)},
  14801         year = {2001},
  14802         month = {November},
  14803         pages = {116--125},
  14804         publisher = {ACM Press},
  14805         organization = {ACM Press},
  14806         abstract = {We present a mathematical construct which provides a cryptographic protocol to verifiably shuffle a sequence of k modular integers, and discuss its application to secure, universally verifiable, multi-authority election schemes. The output of the shuffle operation is another sequence of k modular integers, each of which is the same secret power of a corresponding input element, but the order of elements in the output is kept secret. Though it is a trivial matter for the "shuffler" (who chooses the permutation of the elements to be applied) to compute the output from the input, the construction is important because it provides a linear size proof of correctness for the output sequence (i.e. a proof that it is of the form claimed) that can be checked by an arbitrary verifiers. The complexity of the protocol improves on that of Furukawa-Sako[16] both measured by number of exponentiations and by overall size.The protocol is shown to be honest-verifier zeroknowledge in a special case, and is computational zeroknowledge in general. On the way to the final result, we also construct a generalization of the well known Chaum-Pedersen protocol for knowledge of discrete logarithm equality [10], [7]. In fact, the generalization specializes exactly to the Chaum-Pedersen protocol in the case k = 2. This result may be of interest on its own.An application to electronic voting is given that matches the features of the best current protocols with significant efficiency improvements. An alternative application to electronic voting is also given that introduces an entirely new paradigm for achieving Universally Verifiable elections},
  14807         www_section = {discrete logarithm, multi-authority},
  14808         isbn = {1-58113-385-5},
  14809         doi = {10.1145/501983.502000},
  14810         url = {http://portal.acm.org/citation.cfm?id=502000},
  14811         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shuffle-ccs01.pdf},
  14812         author = {Andrew C. Neff},
  14813         editor = {Pierangela Samarati}
  14814 }
  14815 @booklet {Heydon01thevesta,
  14816         title = {The Vesta Approach to Software Configuration Management},
  14817         year = {2001},
  14818         abstract = {Vesta is a system for software configuration management. It stores collections of source files, keeps track of which versions of which files go together, and automates the process of building a complete software artifact from its component pieces. Vesta's novel approach gives it three important properties. First, every build is repeatable, because its component sources and build tools are stored immutably and immortally, and its configuration description completely specifies what components and tools are used and how they are put together. Second, every build is incremental, because results of previous builds are cached and reused. Third, every build is consistent, because all build dependencies are automatically captured and recorded, so that a cached result from a previous build is reused only when doing so is certain to be correct. In addition, Vesta's flexible language for writing configuration descriptions makes it easy to describe large software configurations in a modular fashion and to create variant configurations by customizing build parameters. This paper gives a brief overview of Vesta, outlining Vesta's advantages over traditional tools, how those benefits are achieved, and the system's overall performance},
  14819         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.23.7370},
  14820         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SRC-RR-168.pdf},
  14821         author = {Allan Heydon and Roy Levin and Timothy Mann and Yuan Yu}
  14822 }
  14823 @conference {502054,
  14824         title = {Wide-area cooperative storage with CFS},
  14825         booktitle = {SOSP '01: Proceedings of the eighteenth ACM symposium on Operating systems principles},
  14826         year = {2001},
  14827         pages = {202--215},
  14828         publisher = {ACM},
  14829         organization = {ACM},
  14830         address = {New York, NY, USA},
  14831         abstract = {The Cooperative File System (CFS) is a new peer-to-peer read-only storage system that provides provable guarantees for the efficiency, robustness, and load-balance of file storage and retrieval. CFS does this with a completely decentralized architecture that can scale to large systems. CFS servers provide a distributed hash table (DHash) for block storage. CFS clients interpret DHash blocks as a file system. DHash distributes and caches blocks at a fine granularity to achieve load balance, uses replication for robustness, and decreases latency with server selection. DHash finds blocks using the Chord location protocol, which operates in time logarithmic in the number of servers.CFS is implemented using the SFS file system toolkit and runs on Linux, OpenBSD, and FreeBSD. Experience on a globally deployed prototype shows that CFS delivers data to clients as fast as FTP. Controlled tests show that CFS is scalable: with 4,096 servers, looking up a block of data involves contacting only seven servers. The tests also demonstrate nearly perfect robustness and unimpaired performance even when as many as half the servers fail},
  14832         www_section = {P2P},
  14833         isbn = {1-58113-389-8},
  14834         doi = {10.1145/502034.502054},
  14835         url = {http://portal.acm.org/citation.cfm?id=502054$\#$},
  14836         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cfs_sosp.pdf},
  14837         author = {Dabek, Frank and Frans M. Kaashoek and David Karger and Robert Morris and Ion Stoica}
  14838 }
  14839 @conference {Heimbigner00adaptingpublish/subscribe,
  14840         title = {Adapting Publish/Subscribe Middleware to Achieve Gnutella-like Functionality},
  14841         booktitle = {In Proc. of SAC},
  14842         year = {2000},
  14843         pages = {176--181},
  14844         abstract = {Gnutella represents a new wave of peer-to-peer applications providing distributed discovery and sharing of resources across the Internet. Gnutella is distinguished by its support for anonymity and by its decentralized architecture. The current Gnutella architecture and protocol have numerous flaws with respect to efficiency, anonymity, and vulnerability to malicious actions. An alternative design is described that provides Gnutella-like functionality but removes or mitigates many of Gnutella's flaws. This design, referred to as Query/Advertise (Q/A) is based upon a scalable publish/subscribe middleware system called Sienab. A prototype implementation of Q/A is described. The relative benefits of this approach are discussed, and a number of open research problems are identified with respect to Q/A systems},
  14845         url = {http://serl.cs.colorado.edu/~serl/papers/CU-CS-909-00.pdf},
  14846         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CU-CS-909-00.pdf},
  14847         author = {Dennis Heimbigner}
  14848 }
  14849 @booklet {terminology,
  14850         title = {Anonymity, Unobservability, and Pseudonymity: A Consolidated Proposal for Terminology},
  14851         year = {2000},
  14852         month = {July},
  14853         abstract = {Based on the nomenclature of the early papers in the field, we propose a terminology which is both expressive and precise. More particularly, we define anonymity, unlinkability, unobservability, pseudonymity (pseudonyms and digital pseudonyms, and their attributes), and identity management. In addition, we describe the relationships between these terms, give a rational why we define them as we do, and sketch the main mechanisms to provide for the properties defined},
  14854         url = {http://dud.inf.tu-dresden.de/Anon_Terminology.shtml},
  14855         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/terminology.pdf},
  14856         author = {Andreas Pfitzmann and Marit Hansen}
  14857 }
  14858 @conference {DBLP:conf/diau/PfitzmannK00,
  14859         title = {Anonymity, Unobservability, and Pseudonymity--A Proposal for Terminology},
  14860         booktitle = {Workshop on Design Issues in Anonymity and Unobservability},
  14861         year = {2000},
  14862         pages = {1--9},
  14863         url = {https://bibliography.gnunet.org},
  14864         author = {Andreas Pfitzmann and Marit K{\"o}hntopp}
  14865 }
  14866 @conference {mitkuro,
  14867         title = {Attack for Flash MIX},
  14868         booktitle = {Proceedings of ASIACRYPT 2000},
  14869         year = {2000},
  14870         publisher = {Springer-Verlag, LNCS 1976},
  14871         organization = {Springer-Verlag, LNCS 1976},
  14872         abstract = {AMIX net takes a list of ciphertexts (c 1,... , c N) and outputs a permuted list of the plaintexts (m 1,... ,m N) without revealing the relationship between (c 1,... , c N) and (m 1,... ,m N). This paper shows that the Jakobsson's flash MIX of PODC'99, which was believed to be the most efficient robust MIX net, is broken. The first MIX server can prevent computing the correct output with probability 1 in our attack. We also present a countermeasure for our attack},
  14873         isbn = {3-540-41404-5},
  14874         doi = {10.1007/3-540-44448-3_15},
  14875         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.20.6972},
  14876         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.20.6972.pdf},
  14877         author = {Masashi Mitomo and Kaoru Kurosawa}
  14878 }
  14879 @conference {rao-pseudonymity,
  14880         title = {Can Pseudonymity Really Guarantee Privacy?},
  14881         booktitle = {Proceedings of the 9th USENIX Security Symposium},
  14882         year = {2000},
  14883         month = {August},
  14884         pages = {85--96},
  14885         publisher = {USENIX},
  14886         organization = {USENIX},
  14887         abstract = {One of the core challenges facing the Internet today is the problem of ensuring privacy for its users. It is believed that mechanisms such as anonymity and pseudonymity are essential building blocks in formulating solutions to address these challenges and considerable effort has been devoted towards realizing these primitives in practice. The focus of this effort, however, has mostly been on hiding explicit identify information (such as source addresses) by employing a combination of anonymizing proxies, cryptographic techniques to distribute trust among them and traffic shaping techniques to defeat traffic analysis. We claim that such approaches ignore a significant amount of identifying information about the source that leaks from the contents of web traffic itself. In this paper, we demonstrate the significance and value of such information by showing how techniques from linguistics and stylometry can use this information to compromise pseudonymity in several important settings. We discuss the severity of this problem and suggest possible countermeasures},
  14888         www_section = {anonymity, pseudonym},
  14889         url = {http://portal.acm.org/citation.cfm?id=1251313},
  14890         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rao.pdf},
  14891         author = {Josyula R. Rao and Pankaj Rohatgi}
  14892 }
  14893 @conference {339337,
  14894         title = {A case for end system multicast (keynote address)},
  14895         booktitle = {SIGMETRICS '00: Proceedings of the 2000 ACM SIGMETRICS international conference on Measurement and modeling of computer systems},
  14896         year = {2000},
  14897         month = jun,
  14898         pages = {1--12},
  14899         publisher = {ACM},
  14900         organization = {ACM},
  14901         address = {Santa Clara, CA},
  14902         abstract = {The conventional wisdom has been that IP is the natural protocol layer for implementing multicast related functionality. However, ten years after its initial proposal, IP Multicast is still plagued with concerns pertaining to scalability, network management, deployment and support for higher layer functionality such as error, flow and congestion control. In this paper, we explore an alternative architecture for small and sparse groups, where end systems implement all multicast related functionality including membership management and packet replication. We call such a scheme End System Multicast. This shifting of multicast support from routers to end systems has the potential to address most problems associated with IP Multicast. However, the key concern is the performance penalty associated with such a model. In particular, End System Multicast introduces duplicate packets on physical links and incurs larger end-to-end delay than IP Multicast. In this paper, we study this question in the context of the Narada protocol. In Narada, end systems self-organize into an overlay structure using a fully distributed protocol. In addition, Narada attempts to optimize the efficiency of the overlay based on end-to-end measurements. We present details of Narada and evaluate it using both simulation and Internet experiments. Preliminary results are encouraging. In most simulations and Internet experiments, the delay and bandwidth penalty are low. We believe the potential benefits of repartitioning multicast functionality between end systems and routers significantly outweigh the performance penalty incurred},
  14903         www_section = {multicast},
  14904         isbn = {1-58113-194-1},
  14905         doi = {10.1145/339331.339337},
  14906         url = {http://portal.acm.org/citation.cfm?id=339337$\#$},
  14907         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jsac-2001.pdf},
  14908         author = {Chu, Yang-hua and Rao, Sanjay G. and Zhang, Hui}
  14909 }
  14910 @conference {disad-free-routes,
  14911         title = {The disadvantages of free MIX routes and how to overcome them},
  14912         booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design Issues in Anonymity and Unobservability},
  14913         year = {2000},
  14914         month = {July},
  14915         pages = {30--45},
  14916         publisher = {Springer-Verlag, LNCS 2009},
  14917         organization = {Springer-Verlag, LNCS 2009},
  14918         abstract = {There are different methods to build an anonymity service using MIXes. A substantial decision for doing so is the method of choosing the MIX route. In this paper we compare two special configurations: a fixed MIX route used by all participants and a network of freely usable MIXes where each participant chooses his own route. The advantages and disadvantages in respect to the freedom of choice are presented and examined. We'll show that some additional attacks are possible in networks with freely chosen MIX routes. After describing these attacks, we estimate their impact on the achievable degree of anonymity. Finally, we evaluate the relevance of the described attacks with respect to existing systems like e.g. Mixmaster, Crowds, and Freedom},
  14919         www_section = {anonymity, attack},
  14920         isbn = {3-540-41724-9},
  14921         url = {http://portal.acm.org/citation.cfm?id=371975},
  14922         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/disad-free-routes.pdf},
  14923         author = {Oliver Berthold and Andreas Pfitzmann and Ronny Standtke}
  14924 }
  14925 @conference {820485,
  14926         title = {Energy-Efficient Communication Protocol for Wireless Microsensor Networks},
  14927         booktitle = {HICSS '00: Proceedings of the 33rd Hawaii International Conference on System Sciences-Volume 8},
  14928         year = {2000},
  14929         pages = {0--8020},
  14930         publisher = {IEEE Computer Society},
  14931         organization = {IEEE Computer Society},
  14932         address = {Washington, DC, USA},
  14933         abstract = {Wireless distributed micro-sensor systems will enable the reliable monitoring of a variety of environments for both civil and military applications. In this paper, we look at communication protocols, which can have significant impact on the overall energy dissipation of these networks.Based on our findings that the conventional protocols of direct transmission, minimum-transmission-energy, multihop routing, and static clustering may not be optimal for sensor networks, we propose LEACH (Low-Energy Adaptive Clustering Hierarchy), a clustering-based protocol that utilizes randomized rotation of local cluster base stations (cluster-heads) to evenly distribute the energy load among the sensors in the network. LEACH uses localized coordination to enable scalability and robustness for dynamic net-works, and incorporates data fusion into the routing protocol to reduce the amount of information that must be transmitted to the base station. Simulations show that LEACH can achieve as much as a factor of 8 reduction in energy dissipation compared with conventional routing protocols. In addition, LEACH is able to distribute energy dissipation evenly throughout the sensors, doubling the useful system lifetime for the networks we simulated},
  14934         www_section = {Low-Energy Adaptive Clustering Hierarchy, mobile Ad-hoc networks, routing, wireless sensor network},
  14935         isbn = {0-7695-0493-0},
  14936         url = {http://portal.acm.org/citation.cfm?id=820485$\#$},
  14937         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.90.8499.pdf},
  14938         author = {Heinzelman, Wendi Rabiner and Chandrakasan, Anantha and Hari Balakrishnan}
  14939 }
  14940 @conference {514164,
  14941         title = {Enforcing service availability in mobile ad-hoc WANs},
  14942         booktitle = {MobiHoc '00: Proceedings of the 1st ACM international symposium on Mobile ad hoc networking \& computing},
  14943         year = {2000},
  14944         pages = {87--96},
  14945         publisher = {IEEE Press},
  14946         organization = {IEEE Press},
  14947         address = {Piscataway, NJ, USA},
  14948         abstract = {In this paper, we address the problem of service availability in mobile ad-hoc WANs. We present a secure mechanism to stimulate end users to keep their devices turned on, to refrain from overloading the network, and to thwart tampering aimed at converting the device into a "selfish" one. Our solution is based on the application of a tamper resistant security module in each device and cryptographic protection of messages},
  14949         www_section = {ad-hoc networks, cryptography},
  14950         isbn = {0-7803-6534-8},
  14951         url = {http://portal.acm.org/citation.cfm?id=514164},
  14952         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.84.5715.pdf},
  14953         author = {Levente Butty{\'a}n and Jean-Pierre Hubaux}
  14954 }
  14955 @article {339345,
  14956         title = {Feasibility of a serverless distributed file system deployed on an existing set of desktop PCs},
  14957         journal = {SIGMETRICS Performance Evaluation Review},
  14958         volume = {28},
  14959         number = {1},
  14960         year = {2000},
  14961         pages = {34--43},
  14962         publisher = {ACM},
  14963         address = {New York, NY, USA},
  14964         abstract = {We consider an architecture for a serverless distributed file system that does not assume mutual trust among the client computers. The system provides security, availability, and reliability by distributing multiple encrypted replicas of each file among the client machines. To assess the feasibility of deploying this system on an existing desktop infrastructure, we measure and analyze a large set of client machines in a commercial environment. In particular, we measure and report results on disk usage and content; file activity; and machine uptimes, lifetimes, and loads. We conclude that the measured desktop infrastructure would passably support our proposed system, providing availability on the order of one unfilled file request per user per thousand days},
  14965         www_section = {analytical modeling, availability, feasibility analysis, personal computer usage data, reliability, serverless distributed file system architecture, trust, workload characterization},
  14966         issn = {0163-5999},
  14967         doi = {10.1145/345063.339345},
  14968         url = {http://portal.acm.org/citation.cfm?id=345063.339345$\#$},
  14969         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.4280_0.pdf},
  14970         author = {Bolosky, William J. and John R. Douceur and Ely, David and Marvin Theimer}
  14971 }
  14972 @conference {Pei00fisheyestate,
  14973         title = {Fisheye State Routing in Mobile Ad Hoc Networks},
  14974         booktitle = {In ICDCS Workshop on Wireless Networks and Mobile Computing},
  14975         year = {2000},
  14976         pages = {71--78},
  14977         abstract = {In this paper, we present a novel routing protocol for wireless ad hoc networks -- Fisheye State Routing (FSR). FSR introduces the notion of multi-level fisheye scope to reduce routing update overhead in large networks. Nodes exchange link state entries with their neighbors with a frequency which depends on distance to destination. From link state entries, nodes construct the topology map of the entire network and compute optimal routes. Simulation experiments show that FSR is simple, efficient and scalable routing solution in a mobile, ad hoc environment. 1 Introduction As the wireless and embedded computing technologies continue to advance, increasing numbers of small size and high performance computing and communication devices will be capable of tetherless communications and ad hoc wireless networking. An ad hoc wireless network is a selforganizing and self-configuring network with the capability of rapid deployment in response to application needs},
  14978         www_section = {mobile Ad-hoc networks},
  14979         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.6730},
  14980         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/05_75_fisheye-state-routing-in_0.pdf},
  14981         author = {Guangyu Pei and Mario Gerla and Tsu-Wei Chen}
  14982 }
  14983 @conference {freehaven-berk,
  14984         title = {The Free Haven Project: Distributed Anonymous Storage Service},
  14985         booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design Issues in Anonymity and Unobservability},
  14986         year = {2000},
  14987         month = {July},
  14988         publisher = {Springer-Verlag, LNCS 2009},
  14989         organization = {Springer-Verlag, LNCS 2009},
  14990         abstract = {We present a design for a system of anonymous storage which resists the attempts of powerful adversaries to find or destroy any stored data. We enumerate distinct notions of anonymity for each party in the system, and suggest a way to classify anonymous systems based on the kinds of anonymity provided. Our design ensures the availability of each document for a publisher-specified lifetime. A reputation system provides server accountability by limiting the damage caused from misbehaving servers. We identify attacks and defenses against anonymous storage services, and close with a list of problems which are currently unsolved},
  14991         www_section = {accountability, anonymity, anonymous publishing},
  14992         isbn = {978-3-540-41724-8},
  14993         doi = {10.1007/3-540-44702-4},
  14994         url = {http://www.springerlink.com/content/uh3mbw5m6u6xt24v/},
  14995         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freehaven_pres.pdf},
  14996         author = {Roger Dingledine and Michael J. Freedman and David Molnar}
  14997 }
  14998 @booklet {freedom2-arch,
  14999         title = {Freedom Systems 2.0 Architecture},
  15000         year = {2000},
  15001         month = {December},
  15002         publisher = {Zero Knowledge Systems, {Inc.}},
  15003         type = {White Paper},
  15004         abstract = {This white paper, targeted at the technically savvy reader, offers a detailed look at the Freedom 2.0 System architecture. It is intended to give the reader a good understanding of the components that make up this system and the relationships between them, as well as to encourage analysis of the system},
  15005         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freedom2-arch.pdf},
  15006         url = {https://bibliography.gnunet.org},
  15007         author = {Philippe Boucher and Adam Shostack and Ian Goldberg}
  15008 }
  15009 @conference {Clarke00freenet:a_0,
  15010         title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System},
  15011         booktitle = {Designing Privacy Enhancing Technologies, International Workshop on Design Issues in Anonymity and Unobservability, ,, Proceedings 2001},
  15012         year = {2000},
  15013         month = jul,
  15014         pages = {46--66},
  15015         address = {Berkeley, CA, USA},
  15016         abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node},
  15017         url = {http://www.ecse.rpi.edu/Homepages/shivkuma/teaching/sp2001/readings/freenet.pdf},
  15018         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freenet.pdf},
  15019         author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}
  15020 }
  15021 @conference {freenet,
  15022         title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System},
  15023         booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design Issues in Anonymity and Unobservability},
  15024         year = {2000},
  15025         month = {July},
  15026         pages = {46--66},
  15027         abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node},
  15028         www_section = {anonymity, Freenet, P2P},
  15029         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.4919},
  15030         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.4919_0.pdf},
  15031         author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}
  15032 }
  15033 @conference {Clarke00freenet:a,
  15034         title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System},
  15035         booktitle = {Freenet: A Distributed Anonymous Information Storage and Retrieval System},
  15036         year = {2000},
  15037         pages = {46--66},
  15038         abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node},
  15039         www_section = {Freenet, P2P},
  15040         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.4919},
  15041         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.4919.pdf},
  15042         author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}
  15043 }
  15044 @conference {desmedt,
  15045         title = {How To Break a Practical MIX and Design a New One},
  15046         booktitle = {Proceedings of EUROCRYPT 2000},
  15047         year = {2000},
  15048         publisher = {Springer-Verlag, LNCS 1803},
  15049         organization = {Springer-Verlag, LNCS 1803},
  15050         abstract = {A MIX net takes a list of ciphertexts (c 1, ..., c N) and outputs a permuted list of the plaintexts (m 1, ..., m N) without revealing the relationship between (c 1,..., c N) and (m 1, ...,m N). This paper first shows that the Jakobsson's MIX net of Eurocrypt'98, which was believed to be resilient and very efficient, is broken. We next propose an efficient t-resilient MIX net with O(t 2) servers in which the cost of each MIX server is O(N). Two new concepts are introduced, existential-honesty and limited-open-verification. They will be useful for distributed computation in general.
  15051 A part of this research was done while the author visited the Tokyo Institute of Technology, March 4--19, 1999. He was then at the University of Wisconsin {\textemdash} Milwaukee},
  15052         www_section = {existential-honesty, limited-open-verification, mix},
  15053         isbn = {978-3-540-67517-4},
  15054         doi = {10.1007/3-540-45539-6},
  15055         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.1814},
  15056         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.29.1814.pdf},
  15057         author = {Yvo Desmedt and Kaoru Kurosawa}
  15058 }
  15059 @conference {hybrid-mix,
  15060         title = {A Length-Invariant Hybrid MIX},
  15061         booktitle = {Proceedings of ASIACRYPT 2000},
  15062         year = {2000},
  15063         publisher = {Springer-Verlag, LNCS 1976},
  15064         organization = {Springer-Verlag, LNCS 1976},
  15065         abstract = {This paper presents a secure and flexible Mix-net that has the following properties; it efficiently handles long plaintexts that exceed the modulus size of underlying public-key encryption as well as very short ones (length-flexible), input ciphertext length is not impacted by the number of mix-servers (length-invariant), and its security in terms of anonymity is proven in a formal way (provably secure). One can also add robustness i.e. it outputs correct results in the presence of corrupt servers. The security is proved in the random oracle model by showing a reduction from breaking the anonymity of our Mix-net to breaking a sort of indistinguishability of the underlying symmetric encryption scheme or solving the Decision Diffie-Hellman problem},
  15066         www_section = {hybrid encryption, mix},
  15067         isbn = {3-540-41404-5},
  15068         doi = {10.1007/3-540-44448-3_14},
  15069         url = {http://portal.acm.org/citation.cfm?id=647096.716874},
  15070         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.87.7718.pdf},
  15071         author = {Miyako Ohkubo and Masayuki Abe}
  15072 }
  15073 @conference {379239,
  15074         title = {OceanStore: an architecture for global-scale persistent storage},
  15075         booktitle = {ASPLOS-IX: Proceedings of the ninth international conference on Architectural support for programming languages and operating systems},
  15076         year = {2000},
  15077         pages = {190--201},
  15078         publisher = {ACM},
  15079         organization = {ACM},
  15080         address = {New York, NY, USA},
  15081         abstract = {OceanStore is a utility infrastructure designed to span the globe and provide continuous access to persistent information. Since this infrastructure is comprised of untrusted servers, data is protected through redundancy and cryptographic techniques. To improve performance, data is allowed to be cached anywhere, anytime. Additionally, monitoring of usage patterns allows adaptation to regional outages and denial of service attacks; monitoring also enhances performance through pro-active movement of data. A prototype implementation is currently under development},
  15082         isbn = {1-58113-317-0},
  15083         doi = {10.1145/378993.379239},
  15084         url = {http://doi.acm.org/10.1145/378993.379239},
  15085         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p190-kubi.pdf},
  15086         author = {John Kubiatowicz and Bindel, David and Chen, Yan and Czerwinski, Steven and Eaton, Patrick and Geels, Dennis and Gummadi, Ramakrishna and Rhea, Sean C. and Weatherspoon, Hakim and Wells, Chris and Ben Y. Zhao}
  15087 }
  15088 @conference {onion-discex00,
  15089         title = {Onion Routing Access Configurations},
  15090         booktitle = {Proceedings of the DARPA Information Survivability Conference and Exposition (DISCEX 2000)},
  15091         volume = {1},
  15092         year = {2000},
  15093         pages = {34--40},
  15094         publisher = {IEEE CS Press},
  15095         organization = {IEEE CS Press},
  15096         abstract = {Onion Routing is an infrastructure for private communication over a public network. It provides anonymous connections that are strongly resistant to both eavesdropping and traffic analysis. Thus it hides not only the data being sent, but who is talking to whom. Onion Routing's anonymous connections are bidirectional and near real-time, and can be used anywhere a socket connection can be used. Proxy aware applications, such as web browsing and e-mail, require no modification to use Onion Routing, and do so through a series of proxies. Other applications, such as remote login, can also use the system without modification. Access to an onion routing network can be configured in a variety of ways depending on the needs, policies, and facilities of those connecting. This paper describes some of these access configurations and also provides a basic overview of Onion Routing and comparisons with related work},
  15097         www_section = {anonymity, privacy, traffic analysis},
  15098         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.4633},
  15099         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/onion-discex00.pdf},
  15100         author = {Paul Syverson and Michael Reed and David Goldschlag}
  15101 }
  15102 @conference {Jannotti:2000:ORM:1251229.1251243,
  15103         title = {Overcast: reliable multicasting with on overlay network},
  15104         booktitle = {OSDI'00. Proceedings of the 4th conference on Symposium on Operating System Design \& Implementation},
  15105         series = {OSDI'00},
  15106         year = {2000},
  15107         month = oct,
  15108         pages = {14--14},
  15109         publisher = {USENIX Association},
  15110         organization = {USENIX Association},
  15111         address = {San Diego, California, USA},
  15112         abstract = {Overcast is an application-level multicasting system that can be incrementally deployed using today's Internet infrastructure. These properties stem from Overcast's implementation as an overlay network. An overlay network consists of a collection of nodes placed at strategic locations in an existing network fabric. These nodes implement a network abstraction on top of the network provided by the underlying substrate network.
  15113 
  15114 Overcast provides scalable and reliable single-source multicast using a simple protocol for building efficient data distribution trees that adapt to changing network conditions. To support fast joins, Overcast implements a new protocol for efficiently tracking the global status of a changing distribution tree.
  15115 
  15116 Results based on simulations confirm that Overcast provides its added functionality while performing competitively with IP Multicast. Simulations indicate that Overcast quickly builds bandwidth-efficient distribution trees that, compared to IP Multicast, provide 70\%-100\% of the total bandwidth possible, at a cost of somewhat less than twice the network load. In addition, Overcast adapts quickly to changes caused by the addition of new nodes or the failure of existing nodes without causing undue load on the multicast source},
  15117         www_section = {overcast, overlay network},
  15118         url = {http://dl.acm.org/citation.cfm?id=1251229.1251243},
  15119         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OSDI\%2700\%20-\%20Overcast.pdf},
  15120         author = {Jannotti, John and Gifford, David K. and Johnson, Kirk L. and Frans M. Kaashoek and O'Toole Jr., James W.}
  15121 }
  15122 @conference {Song00practicaltechniques,
  15123         title = {Practical Techniques for Searches on Encrypted Data},
  15124         booktitle = { Security and Privacy, 2000. S\&P 2000. Proceedings. 2000 IEEE Symposium on},
  15125         year = {2000},
  15126         month = jan,
  15127         address = { Berkeley, CA, USA},
  15128         abstract = {It is desirable to store data on data storage servers such as mail servers and file servers in encrypted form to reduce security and privacy risks. But this usually implies that one has to sacrifice functionality for security. For example, if a client wishes to retrieve only documents containing certain words, it was not previously known how to let the data storage server perform the search and answer the query without loss of data confidentiality},
  15129         isbn = {0-7695-0665-8},
  15130         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/encrypteddata.pdf},
  15131         url = {https://bibliography.gnunet.org},
  15132         author = {Dawn Xiaodong Song and David Wagner and Adrian Perrig}
  15133 }
  15134 @conference {Shields00aprotocol,
  15135         title = {A Protocol for Anonymous Communication Over the Internet},
  15136         booktitle = {In ACM Conference on Computer and Communications Security},
  15137         year = {2000},
  15138         pages = {33--42},
  15139         publisher = {ACM Press},
  15140         organization = {ACM Press},
  15141         abstract = {This paper presents a new protocol for initiator anonymity called Hordes, which uses forwarding mechanisms similar to those used in previous protocols for sending data, but is the first protocol to make use of the anonymity inherent in multicast routing to receive data. We show this results in shorter transmission latencies and requires less work of the protocol participants, in terms of the messages processed. We also present a comparison of the security and anonymity of Hordes with previous protocols, using the first quantitative definition of anonymity and unlinkability. Our analysis shows that Hordes provides anonymity in a degree similar to that of Crowds and Onion Routing, but also that Hordes has numerous performance advantages},
  15142         www_section = {Hordes, multicast},
  15143         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.37.3890\&rep=rep1\&type=url\&i=0},
  15144         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hordes-final-all.dvi_.pdf},
  15145         author = {Clay Shields and Brian Neil Levine}
  15146 }
  15147 @mastersthesis {ian-thesis,
  15148         title = {A Pseudonymous Communications Infrastructure for the Internet},
  15149         year = {2000},
  15150         month = {December},
  15151         school = {UC Berkeley},
  15152         type = {phd},
  15153         abstract = {A Pseudonymous Communications Infrastructure for the Internet by Ian Avrum Goldberg Doctor of Philosophy in Computer Science University of California at Berkeley Professor Eric Brewer, Chair As more and more of people's everyday activities are being conducted online, there is an ever-increasing threat to personal privacy. Every communicative or commercial transaction you perform online reveals bits of information about you that can be compiled into large dossiers, often without your permission, or even your knowledge},
  15154         www_section = {pseudonym},
  15155         isbn = {0-493-10500-X},
  15156         url = {http://portal.acm.org/citation.cfm?id=933285},
  15157         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.5.3353.pdf},
  15158         author = {Ian Goldberg}
  15159 }
  15160 @conference {publius,
  15161         title = {Publius: A robust, tamper-evident, censorship-resistant and source-anonymous web publishing system},
  15162         booktitle = {Proceedings of the 9th USENIX Security Symposium},
  15163         year = {2000},
  15164         month = {August},
  15165         pages = {59--72},
  15166         abstract = {We describe a system that we have designed and implemented for publishing content on the web. Our publishing scheme has the property that it is very difficult for any adversary to censor or modify the content. In addition, the identity of the publisher is protected once the content is posted. Our system differs from others in that we provide tools for updating or deleting the published content, and users can browse the content in the normal point and click manner using a standard web browser and a client-side proxy that we provide. All of our code is freely available},
  15167         url = {http://portal.acm.org/citation.cfm?id=1251311},
  15168         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/publius.pdf},
  15169         author = {Marc Waldman and Aviel D. Rubin and Lorrie Cranor}
  15170 }
  15171 @article {Resnick:2000:RS:355112.355122,
  15172         title = {Reputation systems},
  15173         journal = {Communications of the ACM},
  15174         volume = {43},
  15175         year = {2000},
  15176         month = dec,
  15177         pages = {45--48},
  15178         publisher = {ACM},
  15179         address = {New York, NY, USA},
  15180         www_section = {reputation systems},
  15181         issn = {0001-0782},
  15182         doi = {http://doi.acm.org/10.1145/355112.355122},
  15183         url = {http://doi.acm.org/10.1145/355112.355122},
  15184         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Communications\%20of\%20the\%20ACM\%20-\%20Reputation\%20Systems.pdf},
  15185         author = {Paul Resnick and Kuwabara, Ko and Zeckhauser, Richard and Eric Friedman}
  15186 }
  15187 @booklet {896561,
  15188         title = {On the Scaling of Feedback Algorithms for Very Large Multicast Groups},
  15189         year = {2000},
  15190         publisher = {University of Mannheim},
  15191         abstract = {Feedback from multicast group members is vital for many multicast protocols. In order to avoid feedback implosion in very large groups feedback algorithms with well behaved scaling-properties must be chosen. In this paper we analyse the performance of three typical feedback algorithms described in the literature. Apart from the basic trade-off between feedback latency and response duplicates we especially focus on the algorithms'' sensitivity to the quality of the group size estimation. Based on this analysis we give recommendations for the choice of well behaved feedback algorithms that are suitable for very large groups},
  15192         url = {http://portal.acm.org/citation.cfm?id=896561$\#$},
  15193         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fuhrmann2001a.pdf},
  15194         author = {Thomas Fuhrmann}
  15195 }
  15196 @conference {Minsky00setreconciliation,
  15197         title = {Set Reconciliation with Nearly Optimal Communication Complexity},
  15198         booktitle = {International Symposium on Information Theory},
  15199         year = {2000},
  15200         pages = {0--232},
  15201         www_section = {set reconciliation},
  15202         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reconcile.pdf},
  15203         url = {https://bibliography.gnunet.org},
  15204         author = {Yaron Minsky and Ari Trachtenberg and Richard Zippel}
  15205 }
  15206 @conference {Papadopouli00sevendegrees,
  15207         title = {Seven Degrees of Separation in Mobile Ad Hoc Networks},
  15208         booktitle = {In IEEE GLOBECOM},
  15209         year = {2000},
  15210         pages = {1707--1711},
  15211         abstract = {We present an architecture that enables the sharing of information among mobile, wireless, collaborating hosts that experience intermittent connectivity to the Internet. Participants in the system obtain data objects from Internet-connected servers, cache them and exchange them with others who are interested in them. The system exploits the fact that there is a high locality of information access within a geographic area. It aims to increase the data availability to participants with lost connectivity to the Internet. We discuss the main components of the system and possible applications. Finally, we present simulation results that show that the ad hoc networks can be very e$\#$ective in distributing popular information. 1 Introduction In a few years, a large percentage of the population in metropolitan areas will be equipped with PDAs, laptops or cell phones with built-in web browsers. Thus, access to information and entertainment will become as important as voice communications},
  15212         www_section = {802.11, file-sharing},
  15213         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.36.5640},
  15214         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/globecom00.pdf},
  15215         author = {Maria Papadopouli and Henning G. Schulzrinne}
  15216 }
  15217 @conference {335325,
  15218         title = {The small-world phenomenon: an algorithm perspective},
  15219         booktitle = {STOC '00: Proceedings of the thirty-second annual ACM symposium on Theory of computing},
  15220         year = {2000},
  15221         pages = {163--170},
  15222         publisher = {ACM},
  15223         organization = {ACM},
  15224         address = {New York, NY, USA},
  15225         abstract = {Long a matter of folklore, the {\textquotedblleft}small-world phenomenon {\textquotedblright} {\textemdash} the principle that we are all linked by short chains of acquaintances {\textemdash} was inaugurated as an area of experimental study in the social sciences through the pioneering work of Stanley Milgram in the 1960's. This work was among the first to make the phenomenon quantitative, allowing people to speak of the {\textquotedblleft}six degrees of separation {\textquotedblright} between any two people in the United States. Since then, a number of network models have been proposed as frameworks in which to study the problem analytically. One of the most refined of these models was formulated in recent work of Watts and Strogatz; their framework provided compelling evidence that the small-world phenomenon is pervasive in a range of networks arising in nature and technology, and a fundamental ingredient in the evolution of the World Wide Web. But existing models are insufficient to explain the striking algorithmic component of Milgram's original findings: that individuals using local information are collectively very effective at actually constructing short paths between two points in a social network. Although recently proposed network models are rich in short paths, we prove that no decentralized algorithm, operating with local information only, can construct short paths in these networks with non-negligible probability. We then define an infinite family of network models that naturally generalizes the Watts-Strogatz model, and show that for one of these models, there is a decentralized algorithm capable of finding short paths with high probability. More generally, we provide a strong characterization of this family of network models, showing that there is in fact a unique model within the family for which decentralized algorithms are effective},
  15226         www_section = {small-world},
  15227         isbn = {1-58113-184-4},
  15228         doi = {10.1145/335305.335325},
  15229         url = {http://portal.acm.org/citation.cfm?id=335325$\#$},
  15230         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swn.pdf},
  15231         author = {Kleinberg, Jon}
  15232 }
  15233 @conference {onion-routing:pet2000,
  15234         title = {Towards an Analysis of Onion Routing Security},
  15235         booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design Issues in Anonymity and Unobservability},
  15236         year = {2000},
  15237         month = {July},
  15238         pages = {96--114},
  15239         publisher = {Springer-Verlag, LNCS 2009},
  15240         organization = {Springer-Verlag, LNCS 2009},
  15241         abstract = {This paper presents a security analysis of Onion Routing, an application independent infrastructure for traffic-analysis-resistant and anonymous Internet connections. It also includes an overview of the current system design, definitions of security goals and new adversary models},
  15242         www_section = {anonymity, privacy, traffic analysis},
  15243         isbn = {978-3-540-41724-8},
  15244         doi = {10.1007/3-540-44702-4},
  15245         url = {http://portal.acm.org/citation.cfm?id=371981},
  15246         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.34.5547.pdf},
  15247         author = {Paul Syverson and Gene Tsudik and Michael Reed and Carl Landwehr}
  15248 }
  15249 @conference {raymond00,
  15250         title = {Traffic Analysis: Protocols, Attacks, Design Issues, and Open Problems},
  15251         booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design Issues in Anonymity and Unobservability},
  15252         year = {2000},
  15253         month = {July},
  15254         pages = {10--29},
  15255         publisher = {Springer-Verlag, LNCS 2009},
  15256         organization = {Springer-Verlag, LNCS 2009},
  15257         abstract = {We present the trafic analysis problem and expose the most important protocols, attacks and design issues. Afterwards, we propose directions for further research. As we are mostly interested in efficient and practical Internet based protocols, most of the emphasis is placed on mix based constructions. The presentation is informal in that no complex definitions and proofs are presented, the aim being more to give a thorough introduction than to present deep new insights},
  15258         www_section = {traffic analysis},
  15259         isbn = {3-540-41724-9},
  15260         doi = {10.1007/3-540-44702-4},
  15261         url = {http://portal.acm.org/citation.cfm?id=371972},
  15262         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raymond00.pdf},
  15263         author = {Jean-Fran{\c c}ois Raymond}
  15264 }
  15265 @booklet {Rivest00trusteconomies,
  15266         title = {Trust Economies in The Free Haven Project},
  15267         year = {2000},
  15268         abstract = {The Free Haven Project aims to deploy a system for distributed data storage which is robust against attempts by powerful adversaries to find and destroy stored data. Free Haven uses a secure mixnet for communication, and it emphasizes distributed, reliable, and anonymous storage over e$\#$cient retrieval. We provide a system for building trust between pseudonymous entities, based entirely on records of observed behavior. Modelling these observed behaviors as an economy allows us to draw heavily on previous economic theory, as well as on existing data havens which base their accountability on financial loss. This trust system provides a means of enforcing accountability without sacrificing anonymity},
  15269         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.27.1639\&rep=rep1\&type=pdf},
  15270         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.27.1639\%20\%282\%29.pdf},
  15271         author = {Ron Rivest and Arthur C. Smith and Brian T. Sniffen}
  15272 }
  15273 @book {2000,
  15274         title = {Trust-region  methods},
  15275         series = {MPS-SIAM Series on Optimization},
  15276         year = {2000},
  15277         publisher = {Society for Industrial and Applied Mathematics and Mathematical Programming Society},
  15278         organization = {Society for Industrial and Applied Mathematics and Mathematical Programming Society},
  15279         address = {Philadelphia, PA},
  15280         isbn = {0898714605},
  15281         issn = {978-0898714609 },
  15282         url = {http://books.google.com/books?hl=es\&lr=\&id=5kNC4fqssYQC\&oi=fnd\&pg=PR15\&dq=trust-region+methods\&ots=j1JMMQ3QJY\&sig=ncLlD3mqZ4KEQ1Z9V2qId4rNffo$\#$v=onepage\&q\&f=false},
  15283         author = {Andrew R. Conn and Nicholas I. M. Gould and Philippe L. Toint}
  15284 }
  15285 @conference {web-mix:pet2000,
  15286         title = {Web MIXes: A system for anonymous and unobservable Internet access},
  15287         booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design Issues in Anonymity and Unobservability},
  15288         year = {2000},
  15289         month = {July},
  15290         pages = {115--129},
  15291         publisher = {Springer-Verlag, LNCS 2009},
  15292         organization = {Springer-Verlag, LNCS 2009},
  15293         abstract = {We present the architecture, design issues and functions of a MIX-based system for anonymous and unobservable real-time Internet access. This system prevents traffic analysis as well as flooding attacks. The core technologies include an adaptive, anonymous, time/volumesliced channel mechanism and a ticket-based authentication mechanism. The system also provides an interface to inform anonymous users about their level of anonymity and unobservability},
  15294         www_section = {anonymity, traffic analysis},
  15295         isbn = {978-3-540-41724-8},
  15296         doi = {10.1007/3-540-44702-4},
  15297         url = {http://portal.acm.org/citation.cfm?id=371983},
  15298         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/web-mix-pet2000.pdf},
  15299         author = {Oliver Berthold and Hannes Federrath and Stefan K{\"o}psell}
  15300 }
  15301 @article {335405,
  15302         title = {XMill: an efficient compressor for XML data},
  15303         journal = {SIGMOD Rec},
  15304         volume = {29},
  15305         number = {2},
  15306         year = {2000},
  15307         pages = {153--164},
  15308         publisher = {ACM},
  15309         address = {New York, NY, USA},
  15310         abstract = {We describe a tool for compressing XML data, with applications in data exchange and archiving, which usually achieves about twice the compression ratio of gzip at roughly the same speed. The compressor, called XMill, incorporates and combines existing compressors in order to apply them to heterogeneous XML data: it uses zlib, the library function for gzip, a collection of datatype specific compressors for simple data types, and, possibly, user defined compressors for application specific data types},
  15311         www_section = {compression},
  15312         issn = {0163-5808},
  15313         doi = {10.1145/335191.335405},
  15314         url = {http://portal.acm.org/citation.cfm?id=335405$\#$},
  15315         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.33.2632.pdf},
  15316         author = {Liefke, Hartmut and Suciu, Dan}
  15317 }
  15318 @article {xor-trees,
  15319         title = {Xor-trees for efficient anonymous multicast and reception},
  15320         journal = {ACM Trans. Inf. Syst. Secur},
  15321         volume = {3},
  15322         number = {2},
  15323         year = {2000},
  15324         pages = {63--84},
  15325         publisher = {ACM Press},
  15326         address = {New York, NY, USA},
  15327         abstract = {In this work we examine the problem of efficient anonymous broadcast and reception in general communication networks. We show an algorithm which achieves anonymous communication with O(1) amortized communication complexity on each link and low computational complexity. In contrast, all previous solutions require polynomial (in the size of the network and security parameter) amortized communication complexity},
  15328         www_section = {anonymity, anonymous multicast, communication complexity},
  15329         isbn = {978-3-540-63384-6},
  15330         issn = {1094-9224},
  15331         doi = {10.1145/354876.354877},
  15332         url = {http://portal.acm.org/citation.cfm?id=354876.354877},
  15333         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.73.6464.pdf},
  15334         author = {Shlomi Dolev and Rafail Ostrovsky}
  15335 }
  15336 @conference {Nisan:1999:AMD:301250.301287,
  15337         title = {Algorithmic Mechanism Design},
  15338         booktitle = {STOC'99. Proceedings of the thirty-first Annual ACM Symposium On Theory of Computing},
  15339         series = {STOC '99},
  15340         year = {1999},
  15341         month = may,
  15342         pages = {129--140},
  15343         publisher = {ACM},
  15344         organization = {ACM},
  15345         address = {Atlanta, Georgia, USA},
  15346         abstract = {We consider algorithmic problems in a distributed setting where the participants cannot be assumed to follow the algorithm but rather their own self-interest. As such participants, termed agents, are capable of manipulating the algorithm, the algorithm designer should ensure in advance that the agents ' interests are best served by behaving correctly. Following notions from the field of mechanism design, we suggest a framework for studying such algorithms. Our main technical contribution concerns the study of a representative task scheduling problem for which the standard mechanism design tools do not suffice },
  15347         www_section = {algorithms, mechanis design},
  15348         isbn = {1-58113-067-8},
  15349         doi = {http://doi.acm.org/10.1145/301250.301287},
  15350         url = {http://doi.acm.org/10.1145/301250.301287},
  15351         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2799\%20-\%20Nisan\%20\%26\%20Ronen\%20-\%20Algorithmic\%20mechanism\%20design.pdf},
  15352         author = {Nisan, Noam and Ronen, Amir}
  15353 }
  15354 @conference {Nisan:1999:ASA:1764891.1764893,
  15355         title = {Algorithms for Selfish Agents},
  15356         booktitle = {STAC'99. Symposium on Theoretical Aspects of Computer Science},
  15357         series = {STACS'99},
  15358         year = {1999},
  15359         month = mar,
  15360         pages = {1--15},
  15361         publisher = {Springer-Verlag},
  15362         organization = {Springer-Verlag},
  15363         address = {Trier, Germany},
  15364         abstract = {This paper considers algorithmic problems in a distributed setting where the participants cannot be assumed to follow the algorithm but rather their own self-interest. Such scenarios arise, in particular, when computers or users aim to cooperate or trade over the Internet. As such participants, termed agents, are capable of manipulating the algorithm, the algorithm designer should ensure in advance that the agents' interests are best served by behaving correctly.
  15365 
  15366 This exposition presents a model to formally study such algorithms. This model, based on the field of mechanism design, is taken from the author's joint work with Amir Ronen, and is similar to approaches taken in the distributed AI community in recent years. Using this model, we demonstrate how some of the techniques of mechanism design can be applied towards distributed computation problems. We then exhibit some issues that arise in distributed computation which require going beyond the existing theory of mechanism design},
  15367         www_section = {algorithms, mechanism design, selfish agent},
  15368         isbn = {3-540-65691-X},
  15369         url = {http://dl.acm.org/citation.cfm?id=1764891.1764893},
  15370         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STACS\%2799\%20-\%20Nisan\%20-\%20Algorithms\%20for\%20selfish\%20agents.pdf},
  15371         author = {Nisan, Noam}
  15372 }
  15373 @article {338955,
  15374         title = {Ant algorithms for discrete optimization},
  15375         journal = {Artif. Life},
  15376         volume = {5},
  15377         number = {2},
  15378         year = {1999},
  15379         pages = {137--172},
  15380         publisher = {MIT Press},
  15381         address = {Cambridge, MA, USA},
  15382         abstract = {This article presents an overview of recent work on ant algorithms, that is, algorithms for discrete optimization that took inspiration from the observation of ant colonies' foraging behavior, and introduces the ant colony optimization (ACO) metaheuristic. In the first part of the article the basic biological findings on real ants are reviewed and their artificial counterparts as well as the ACO metaheuristic are defined. In the second part of the article a number of applications of ACO algorithms to combinatorial optimization and routing in communications networks are described. We conclude with a discussion of related work and of some of the most important aspects of the ACO metaheuristic},
  15383         www_section = {ant colony optimization, metaheuristics, natural computation, swarm intelligence},
  15384         issn = {1064-5462},
  15385         doi = {10.1162/106454699568728},
  15386         url = {http://portal.acm.org/citation.cfm?id=338955$\#$},
  15387         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ij_23-alife99.pdf},
  15388         author = {Dorigo, Marco and Di Caro, Gianni and Gambardella, Luca M.}
  15389 }
  15390 @conference {1039861,
  15391         title = {Burt: The Backup and Recovery Tool},
  15392         booktitle = {LISA '99: Proceedings of the 13th USENIX conference on System administration},
  15393         year = {1999},
  15394         pages = {207--218},
  15395         publisher = {USENIX Association},
  15396         organization = {USENIX Association},
  15397         address = {Berkeley, CA, USA},
  15398         abstract = {Burt is a freely distributed parallel network backup system written at the University of Wisconsin, Madison. It is designed to backup large heterogeneous networks. It uses the Tcl scripting language and standard backup programs like dump(1) and GNUTar to enable backups of a wide variety of data sources, including UNIX and Windows NT workstations, AFS based storage, and others. It also uses Tcl for the creation of the user interface, giving the system administrator great flexibility in customizing the system. Burt supports parallel backups to ensure high backup speeds, and checksums to ensure data integrity. The principal contribution of Burt is that it provides a powerful I/O engine within the context of a flexible scripting language; this combination enables graceful solutions to many problems associated with backups of large installations. At our site, we use Burt to backup data from 350 workstations and from our AFS servers, a total of approximately 900 GB every two weeks},
  15399         www_section = {backup},
  15400         url = {http://portal.acm.org/citation.cfm?id=1039861$\#$},
  15401         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.112.7612.pdf},
  15402         author = {Melski, Eric}
  15403 }
  15404 @article {319159,
  15405         title = {Deciding when to forget in the Elephant file system},
  15406         journal = {SIGOPS Oper. Syst. Rev},
  15407         volume = {33},
  15408         number = {5},
  15409         year = {1999},
  15410         pages = {110--123},
  15411         publisher = {ACM},
  15412         address = {New York, NY, USA},
  15413         abstract = {Modern file systems associate the deletion of a file with the immediate release of storage, and file writes with the irrevocable change of file contents. We argue that this behavior is a relic of the past, when disk storage was a scarce resource. Today, large cheap disks make it possible for the file system to protect valuable data from accidental delete or overwrite. This paper describes the design, implementation, and performance of the Elephant file system, which automatically retains all important versions of user files. Users name previous file versions by combining a traditional pathname with a time when the desired version of a file or directory existed. Storage in Elephant is managed by the system using filegrain user-specified retention policies. This approach contrasts with checkpointing file systems such as Plan-9, AFS, and WAFL that periodically generate efficient checkpoints of entire file systems and thus restrict retention to be guided by a single policy for all files within that file system. Elephant is implemented as a new Virtual File System in the FreeBSD kernel},
  15414         www_section = {file systems, storage},
  15415         issn = {0163-5980},
  15416         doi = {10.1145/319344.319159},
  15417         url = {http://portal.acm.org/citation.cfm?id=319159$\#$},
  15418         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p110-santry.pdf},
  15419         author = {Santry, Douglas S. and Feeley, Michael J. and Hutchinson, Norman C. and Veitch, Alistair C. and Carton, Ross W. and Ofir, Jacob}
  15420 }
  15421 @mastersthesis {1999_0,
  15422         title = {A Distributed Decentralized Information Storage and Retrieval System},
  15423         volume = {PhD},
  15424         year = {1999},
  15425         school = {University of Edinburgh},
  15426         abstract = {This report describes an algorithm which if executed by a group of interconnected nodes will provide a robust key-indexed information storage and retrieval system with no element of central control or administration. It allows information to be made available to a large group of people in a similar manner to the "World Wide Web". Improvements over this existing system include:--No central control or administration required--Anonymous information publication and retrieval--Dynamic duplication of popular information--Transfer of information location depending upon demand There is also potential for this system to be used in a modified form as an information publication system within a large organisation which may wish to utilise unused storage space which is distributed across the organisation. The system's reliability is not guaranteed, nor is its efficiency, however the intention is that the efficiency and reliability will be sufficient to make the system useful, and demonstrate that},
  15427         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.32.3665\&rep=rep1\&type=pdf},
  15428         url = {https://bibliography.gnunet.org},
  15429         author = {Ian Clarke}
  15430 }
  15431 @conference {301333,
  15432         title = {Flash mixing},
  15433         booktitle = {PODC '99: Proceedings of the eighteenth annual ACM symposium on Principles of distributed computing},
  15434         year = {1999},
  15435         pages = {83--89},
  15436         publisher = {ACM},
  15437         organization = {ACM},
  15438         address = {New York, NY, USA},
  15439         isbn = {1-58113-099-6},
  15440         doi = {http://doi.acm.org/10.1145/301308.301333},
  15441         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/flash-mix.pdf},
  15442         url = {https://bibliography.gnunet.org},
  15443         author = {Jakobsson, Markus}
  15444 }
  15445 @conference {syverson99,
  15446         title = {Group Principals and the Formalization of Anonymity},
  15447         booktitle = {Proceedings of the World Congress on Formal Methods (1)},
  15448         year = {1999},
  15449         month = jan,
  15450         pages = {814--833},
  15451         abstract = {We introduce the concept of a group principal and present a number of different classes of group principals, including threshold-group-principals. These appear to naturally useful concepts for looking at security. We provide an associated epistemic language and logic and use it to reason about anonymity protocols and anonymity services, where protection properties are formulated from the intruder's knowledge of group principals. Using our language, we give an epistemic characterization of anonymity properties. We also present a specification of a simple anonymizing system using our theory},
  15452         www_section = {anonymity service},
  15453         isbn = {3-540-66587-0},
  15454         doi = {10.1007/3-540-48119-2},
  15455         url = {http://portal.acm.org/citation.cfm?id=730472},
  15456         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/1999syverson-fm99.pdf},
  15457         author = {Paul Syverson and Stuart Stubblebine}
  15458 }
  15459 @conference {758535,
  15460         title = {New Sequences of Linear Time Erasure Codes Approaching the Channel Capacity},
  15461         booktitle = {AAECC-13: Proceedings of the 13th International Symposium on Applied Algebra, Algebraic Algorithms and Error-Correcting Codes},
  15462         year = {1999},
  15463         pages = {65--76},
  15464         publisher = {Springer-Verlag},
  15465         organization = {Springer-Verlag},
  15466         address = {London, UK},
  15467         abstract = {We will introduce a new class of erasure codes built from irregular bipartite graphs that have linear time encoding and decoding algorithms and can transmit over an erasure channel at rates arbitrarily close to the channel capacity. We also show that these codes are close to optimal with respect to the trade-off between the proximity to the channel capacity and the running time of the recovery algorithm},
  15468         www_section = {coding theory, irregular bipartite graphs, recovery algorithm},
  15469         isbn = {3-540-66723-7},
  15470         url = {http://portal.acm.org/citation.cfm?id=758535\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$},
  15471         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/new_sequences_of_linear_time_erasure_cod_64778.pdf},
  15472         author = {M. Amin Shokrollahi}
  15473 }
  15474 @conference {313556,
  15475         title = {Next century challenges: scalable coordination in sensor networks},
  15476         booktitle = {MobiCom '99: Proceedings of the 5th annual ACM/IEEE international conference on Mobile computing and networking},
  15477         year = {1999},
  15478         pages = {263--270},
  15479         publisher = {ACM},
  15480         organization = {ACM},
  15481         address = {New York, NY, USA},
  15482         abstract = {Networked sensors -- those that coordinate amongst themselves to achieve a larger sensing task -- will revolutionize information gathering and processing both in urban environments and in inhospitable terrain. The sheer numbers of these sensors and the expected dynamics in these environments present unique challenges in the design of unattended autonomous sensor networks. These challenges lead us to hypothesize that sensor network coordination applications may need to be structured differently from traditional network applications. In particular, we believe that localized algorithms (in which simple local node behavior achieves a desired global objective) may be necessary for sensor network coordination. In this paper, we describe localized algorithms, and then discuss directed diffusion, a simple communication model for describing localized algorithms},
  15483         www_section = {sensor networks},
  15484         isbn = {1-58113-142-9},
  15485         doi = {10.1145/313451.313556},
  15486         url = {http://portal.acm.org/citation.cfm?id=313451.313556$\#$},
  15487         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2867.pdf},
  15488         author = {Deborah Estrin and Govindan, Ramesh and Heidemann, John and Kumar, Satish}
  15489 }
  15490 @article {Goldschlag99onionrouting,
  15491         title = {Onion Routing for Anonymous and Private Internet Connections},
  15492         journal = {Communications of the ACM},
  15493         volume = {42},
  15494         year = {1999},
  15495         pages = {39--41},
  15496         abstract = {this article's publication, the prototype network is processing more than 1 million Web connections per month from more than six thousand IP addresses in twenty countries and in all six main top level domains. [7] Onion Routing operates by dynamically building anonymous connections within a network of real-time Chaum Mixes [3]. A Mix is a store and forward device that accepts a number of fixed-length messages from numerous sources, performs cryptographic transformations on the messages, and then forwards the messages to the next destination in a random order. A single Mix makes tracking of a particular message either by specific bit-pattern, size, or ordering with respect to other messages difficult. By routing through numerous Mixes in the network, determining who is talking to whom becomes even more difficult. Onion Routing's network of core onion-routers (Mixes) is distributed, faulttolerant, and under the control of multiple administrative domains, so no single onion-router can bring down the network or compromise a user's privacy, and cooperation between compromised onion-routers is thereby confounded},
  15497         url = { http://www.onion-router.net/Publications/CACM-1999 },
  15498         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/onionrouting.pdf},
  15499         author = {David Goldschlag and Michael Reed and Paul Syverson}
  15500 }
  15501 @conference {1268712,
  15502         title = {Operation-based update propagation in a mobile file system},
  15503         booktitle = {ATEC '99: Proceedings of the annual conference on USENIX Annual Technical Conference},
  15504         year = {1999},
  15505         pages = {4--4},
  15506         publisher = {USENIX Association},
  15507         organization = {USENIX Association},
  15508         address = {Berkeley, CA, USA},
  15509         abstract = {In this paper we describe a technique called operation-based update propagation for efficiently transmitting updates to large files that have been modified on a weakly connected client of a distributed file system. In this technique, modifications are captured above the file-system layer at the client, shipped to a surrogate client that is strongly connected to a server, re-executed at the surrogate, and the resulting files transmitted from the surrogate to the server. If re-execution fails to produce a file identical to the original, the system falls back to shipping the file from the client over the slow network. We have implemented a prototype of this mechanism in the Coda File System on Linux, and demonstrated performance improvements ranging from 40 percents to nearly three orders of magnitude in reduced network traffic and elapsed time. We also found a novel use of forward error correction in this context},
  15510         url = {http://portal.acm.org/citation.cfm?id=1268712$\#$},
  15511         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lee.pdf},
  15512         author = {Lee, Yui-Wah and Leung, Kwong-Sak and Satyanarayanan, Mahadev}
  15513 }
  15514 @conference {1999_1,
  15515         title = {Public-key Cryptosystems Based on Composite Degree Residuosity Classes},
  15516         booktitle = {Proceedings of the 17th International Conference on Theory and Application of Cryptographic Techniques},
  15517         year = {1999},
  15518         publisher = {Springer-Verlag},
  15519         organization = {Springer-Verlag},
  15520         address = {Berlin, Heidelberg},
  15521         abstract = {This paper investigates a novel computational problem, namely the Composite Residuosity Class Problem, and its applications to public-key cryptography. We propose a new trapdoor mechanism and derive from this technique three encryption schemes : a trapdoor permutation and two homomorphic probabilistic encryption schemes computationally comparable to RSA. Our cryptosystems, based on usual modular arithmetics, are provably secure under appropriate assumptions in the standard model},
  15522         isbn = {3-540-65889-0},
  15523         url = {http://dl.acm.org/citation.cfm?id=1756123.1756146},
  15524         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PublicKeyCryptoSystems1999Paillier.pdf},
  15525         author = {Paillier, Pascal}
  15526 }
  15527 @article {RePEc:bla:restud:v:66:y:1999:i:1:p:3-21,
  15528         title = {The Theory of Moral Hazard and Unobservable Behaviour: Part I},
  15529         journal = {Review of Economic Studies},
  15530         volume = {66},
  15531         number = {1},
  15532         year = {1999},
  15533         month = jan,
  15534         pages = {3--21},
  15535         abstract = {This article presents information on principal-agent models in which outcomes conditional on the agent's action are uncertain, and the agent's behavior therefore unobservable. For a model with bounded agent's utility, conditions are given under which the first-best equilibrium can be approximated arbitrarily closely by contracts relating payment to observable outcomes. For general models, it is shown that the solution may not always be obtained by using the agent's first-order conditions as constraint. General conditions of Lagrangean type are given for problems in which contracts are finite-dimensional},
  15536         www_section = {contracts, Lagrangean conditions, unobservability},
  15537         url = {http://econpapers.repec.org/RePEc:bla:restud:v:66:y:1999:i:1:p:3-21},
  15538         author = {Mirrlees, James A.}
  15539 }
  15540 @conference {314722,
  15541         title = {Analysis of random processes via And-Or tree evaluation},
  15542         booktitle = {SODA '98: Proceedings of the ninth annual ACM-SIAM symposium on Discrete algorithms},
  15543         year = {1998},
  15544         pages = {364--373},
  15545         publisher = {Society for Industrial and Applied Mathematics},
  15546         organization = {Society for Industrial and Applied Mathematics},
  15547         address = {Philadelphia, PA, USA},
  15548         abstract = {We introduce a new set of probabilistic analysis tools based on the analysis of And-Or trees with random inputs. These tools provide a unifying, intuitive, and powerful framework for carrying out the analysis of several previously studied random processes of interest, including random loss-resilient codes, solving random k-SAT formula using the pure literal rule, and the greedy algorithm for matchings in random graphs. In addition, these tools allow generalizations of these problems not previously analyzed to be analyzed in a straightforward manner. We illustrate our methodology on the three problems listed above. 1 Introduction We introduce a new set of probabilistic analysis tools related to the amplification method introduced by [12] and further developed and used in [13, 5]. These tools provide a unifying, intuitive, and powerful framework for carrying out the analysis of several previously studied random processes of interest, including the random loss-resilient codes introduced},
  15549         www_section = {And-Or trees, coding theory},
  15550         isbn = {0-89871-410-9},
  15551         url = {http://portal.acm.org/citation.cfm?id=314722\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$},
  15552         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.63.2427.pdf},
  15553         author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi}
  15554 }
  15555 @article {Reed98anonymousconnections,
  15556         title = {Anonymous Connections and Onion Routing},
  15557         journal = {IEEE Journal on Selected Areas in Communications},
  15558         volume = {16},
  15559         year = {1998},
  15560         pages = {482--494},
  15561         abstract = {Onion Routing is an infrastructure for private communication over a public network. It provides anonymous connections that are strongly resistant to both eavesdropping and traffic analysis. Onion routing's anonymous connections are bidirectional and near realtime, and can be used anywhere a socket connection can be used. Any identifying information must be in the data stream carried over an anonymous connection. An onion is a data structure that is treated as the destination address by onion routers; thus, it is used to establish an anonymous connection. Onions themselves appear differently to each onion router as well as to network observers. The same goes for data carried over the connections they establish. Proxy aware applications, such as web browsing and e-mail, require no modification to use onion routing, and do so through a series of proxies. A prototype onion routing network is running between our lab and other sites. This paper describes anonymous connections and their imple},
  15562         www_section = {anonymity, onion routing},
  15563         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.35.2362},
  15564         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.8267.pdf},
  15565         author = {Michael Reed and Paul Syverson and David Goldschlag}
  15566 }
  15567 @article {Reiter98crowds:anonymity,
  15568         title = {Crowds: Anonymity for web transactions},
  15569         journal = {ACM Transactions on Information and System Security},
  15570         volume = {1},
  15571         year = {1998},
  15572         pages = {66--92},
  15573         abstract = {Crowds is a system that allows anonymous web-surfing. For each host, a random static path through the crowd is formed that then acts as a sequence of proxies, indirecting replies and responses. Vulnerable when facing adversaries that can perform traffic analysis at the local node and without responder anonymity. But highly scalable and efficient},
  15574         www_section = {anonymous web browsing, Crowds},
  15575         url = {http://avirubin.com/crowds.pdf},
  15576         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crowds.pdf},
  15577         author = {Michael K. Reiter and Aviel D. Rubin}
  15578 }
  15579 @conference {nym-alias-net,
  15580         title = {The Design, Implementation and Operation of an Email Pseudonym Server},
  15581         booktitle = {Proceedings of the 5th ACM Conference on Computer and Communications Security (CCS 1998)},
  15582         year = {1998},
  15583         month = {November},
  15584         publisher = {ACM Press},
  15585         organization = {ACM Press},
  15586         abstract = {Attacks on servers that provide anonymity generally fall into two categories: attempts to expose anonymous users and attempts to silence them. Much existing work concentrates on withstanding the former, but the threat of the latter is equally real. One particularly e$\#$ective attack against anonymous servers is to abuse them and stir up enough trouble that they must shut down. This paper describes the design, implementation, and operation of nym.alias.net, a server providing untraceable email aliases. We enumerate many kinds of abuse the system has weathered during two years of operation, and explain the measures we enacted in response. From our experiences, we distill several principles by which one can protect anonymous servers from similar attacks},
  15587         isbn = {1-58113-007-4},
  15588         doi = {10.1145/288090.288098},
  15589         url = {http://portal.acm.org/citation.cfm?id=288098},
  15590         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nym-alias-net.pdf},
  15591         author = {David Mazi{\`e}res and Frans M. Kaashoek}
  15592 }
  15593 @conference {285258,
  15594         title = {A digital fountain approach to reliable distribution of bulk data},
  15595         booktitle = {SIGCOMM'98: Proceedings of SIGCOMM'98 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication},
  15596         year = {1998},
  15597         month = sep,
  15598         pages = {56--67},
  15599         publisher = {ACM},
  15600         organization = {ACM},
  15601         address = {Vancouver, Canada},
  15602         abstract = {The proliferation of applications that must reliably distribute bulk data to a large number of autonomous clients motivates the design of new multicast and broadcast protocols. We describe an ideal, fully scalable protocol for these applications that we call a digital fountain. A digital fountain allows any number of heterogeneous clients to acquire bulk data with optimal efficiency at times of their choosing. Moreover, no feedback channels are needed to ensure reliable delivery, even in the face of high loss rates.We develop a protocol that closely approximates a digital fountain using a new class of erasure codes that for large block sizes are orders of magnitude faster than standard erasure codes. We provide performance measurements that demonstrate the feasibility of our approach and discuss the design, implementation and performance of an experimental system},
  15603         www_section = {coding theory, multicast},
  15604         isbn = {1-58113-003-1},
  15605         doi = {10.1145/285237.285258},
  15606         url = {http://portal.acm.org/citation.cfm?id=285258\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$},
  15607         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.72.3011.pdf},
  15608         author = {Byers, John W. and Luby, Michael and Michael Mitzenmacher and Rege, Ashutosh}
  15609 }
  15610 @article {Xu98lowdensity,
  15611         title = {Low Density MDS Codes and Factors of Complete Graphs},
  15612         journal = {IEEE Trans. on Information Theory},
  15613         volume = {45},
  15614         year = {1998},
  15615         pages = {1817--1826},
  15616         abstract = {We reveal an equivalence relation between the construction of a new class of low density MDS array codes, that we call B-Code, and a combinatorial problem known as perfect onefactorization of complete graphs. We use known perfect one-factors of complete graphs to create constructions and decoding algorithms for both B-Code and its dual code. B-Code and its dual are optimal in the sense that (i) they are MDS, (ii) they have an optimal encoding property, i.e., the number of the parity bits that are affected by change of a single information bit is minimal and (iii) they have optimal length. The existence of perfect one-factorizations for every complete graph with an even number of nodes is a 35 years long conjecture in graph theory. The construction of B-codes of arbitrary odd length will provide an affirmative answer to the conjecture},
  15617         www_section = {array codes, low density, MDS Codes, update complexity},
  15618         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.8899},
  15619         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.42.8899.pdf},
  15620         author = {Lihao Xu and Vasken Bohossian and Jehoshua Bruck and David Wagner}
  15621 }
  15622 @article {581193,
  15623         title = {Modelling with Generalized Stochastic Petri Nets},
  15624         journal = {SIGMETRICS Perform. Eval. Rev},
  15625         volume = {26},
  15626         number = {2},
  15627         year = {1998},
  15628         pages = {0--2},
  15629         publisher = {ACM},
  15630         address = {New York, NY, USA},
  15631         issn = {0163-5999},
  15632         doi = {10.1145/288197.581193},
  15633         url = {http://portal.acm.org/citation.cfm?id=581193$\#$},
  15634         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.83.6433.pdf},
  15635         author = {Marco Ajmone Marsan and Gianfranco Balbo and Gianni Conte and Susanna Donatelli and Giuliana Franceschinis}
  15636 }
  15637 @booklet {pipenet10,
  15638         title = {PipeNet 1.0},
  15639         year = {1998},
  15640         month = jan,
  15641         url = {http://weidai.com/pipenet.txt},
  15642         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/http___freehaven.net_anonbib_cache_pipenet10.html_.pdf},
  15643         author = {Dai, Wei}
  15644 }
  15645 @booklet {citeulike:2549551,
  15646         title = {PipeNet 1.1},
  15647         year = {1998},
  15648         url = {http://www.eskimo.com/~weidai/pipenet.txt},
  15649         author = {Dai, Wei}
  15650 }
  15651 @conference {tau-indy,
  15652         title = {A Random Server Model for Private Information Retrieval or How to Achieve Information Theoretic PIR Avoiding Database Replication},
  15653         booktitle = {Proceedings of the Second International Workshop on Randomization and Approximation Techniques in Computer Science (RANDOM '98)},
  15654         year = {1998},
  15655         pages = {200--217},
  15656         publisher = {Springer-Verlag},
  15657         organization = {Springer-Verlag},
  15658         address = {London, UK},
  15659         abstract = {Private information retrieval (PIR) schemes provide a user with information from a database while keeping his query secret from the database manager. We propose a new model for PIR, utilizing auxiliary random servers providing privacy services for database access. The principal database initially engages in a preprocessing setup computation with the random servers, followed by the on-line stage with the users. Using this model we achieve the first PIR information theoretic solutions in which the database does not need to give away its data to be replicated, and with minimal on-line computation cost for the database. This solves privacy and efficiency problems inherent to all previous solutions. Specifically, in all previously existing PIR schemes the database on-line computation for one query is at least linear in the size of the data, and all previous information theoretic schemes require multiple replications of the database which are not allowed to communicate with each other.This poses a privacy problem for the database manager, who is required to hand his data to multiple foreign entities, and to the user, who is supposed to trust the multiple copies of the database not to communicate. In contrast, in our solutions no replication is needed, and the database manager only needs to perform O(1) amount of computation to answer questions of users, while all the extra computations required on line for privacy are done by the auxiliary random servers, who contain no information about the data},
  15660         www_section = {anonymity, privacy, private information retrieval},
  15661         isbn = {3-540-65142-X},
  15662         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.6742},
  15663         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.18.6742.pdf},
  15664         author = {Yael Gertner and Shafi Goldwasser and Tal Malkin}
  15665 }
  15666 @article {realtime-mix,
  15667         title = {Real-Time MIXes: A Bandwidth-Efficient Anonymity Protocol},
  15668         journal = {IEEE Journal on Selected Areas in Communications},
  15669         volume = {16},
  15670         number = {4},
  15671         year = {1998},
  15672         pages = {495--509 },
  15673         abstract = {We present techniques for efficient anonymous communication with real-time constraints as necessary for services like telephony, where a continuous data stream has to be transmitted. For concreteness, we present the detailed protocols for the narrow-band ISDN (integrated services digital network), although the heart of our techniques-anonymous channels-can also be applied to other networks. For ISDN, we achieve the same data rate as without anonymity, using the same subscriber lines and without any significant modifications to the long-distance network. A precise performance analysis is given. Our techniques are based on mixes, a method for anonymous communication for e-mail-like services introduced by D. Chaum (1981)},
  15674         www_section = {anonymity, performance analysis},
  15675         issn = {0733-8716 },
  15676         doi = {10.1109/49.668973 },
  15677         url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel4\%2F49\%2F14639\%2F00668973.pdf\%3Farnumber\%3D668973\&authDecision=-203},
  15678         author = {Anja Jerichow and Jan M{\"u}ller and Andreas Pfitzmann and Birgit Pfitzmann and Michael Waidner}
  15679 }
  15680 @book {Goldreich98securemulti-party,
  15681         title = {Secure Multi-Party Computation},
  15682         booktitle = {The Foundations of Cryptography },
  15683         volume = {2},
  15684         year = {1998},
  15685         publisher = {Cambridge University Press},
  15686         organization = {Cambridge University Press},
  15687         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.11.2201\&rep=rep1\&type=pdf},
  15688         www_section = unsorted,
  15689         author = {Oded Goldreich}
  15690 }
  15691 @conference {stop-and-go,
  15692         title = {Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System},
  15693         booktitle = {Proceedings of Information Hiding Workshop (IH 1998)},
  15694         year = {1998},
  15695         publisher = {Springer-Verlag, LNCS 1525},
  15696         organization = {Springer-Verlag, LNCS 1525},
  15697         abstract = {Currently known basic anonymity techniques depend on identity verification. If verification of user identities is not possible due to the related management overhead or a general lack of information (e.g. on the Internet), an adversary can participate several times in a communication relationship and observe the honest users. In this paper we focus on the problem of providing anonymity without identity verification. The notion of probabilistic anonymity is introduced. Probabilistic anonymity is based on a publicly known security parameter, which determines the security of the protocol. For probabilistic anonymity the insecurity, expressed as the probability of having only one honest participant, approaches 0 at an exponential rate as the security parameter is changed linearly. Based on our security model we propose a new MIX variant called {\textquotedblleft}Stop-and-Go-MIX{\textquotedblright} (SG-MIX) which provides anonymity without identity verification, and prove that it is probabilistically secure},
  15698         www_section = {anonymity, identity verification, security parameter},
  15699         isbn = {978-3-540-65386-8},
  15700         doi = {10.1007/3-540-49380-8_7},
  15701         url = {http://www.springerlink.com/content/hmfv2mgy1xqbn852/},
  15702         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/stop-and-go.pdf},
  15703         author = {Dogan Kesdogan and Jan Egner and Roland B{\"u}schkes}
  15704 }
  15705 @conference {abe,
  15706         title = {Universally Verifiable mix-net With Verification Work Independent of The Number of mix Servers},
  15707         booktitle = {Proceedings of EUROCRYPT 1998},
  15708         year = {1998},
  15709         publisher = {Springer-Verlag, LNCS 1403},
  15710         organization = {Springer-Verlag, LNCS 1403},
  15711         abstract = {In this paper we construct a universally verifiable Mix-net where the amount of work done by a verifier is independent of the number of mix-servers. Furthermore, the computational task of each mix-server is constant against the number of mix-servers except for some negligible tasks like addition. The scheme is robust, too},
  15712         www_section = {electronic voting, mix, universal verifiability},
  15713         isbn = {978-3-540-64518-4},
  15714         doi = {10.1007/BFb0054144},
  15715         url = {http://www.springerlink.com/content/hl8838u4l9354544/},
  15716         author = {Masayuki Abe}
  15717 }
  15718 @conference {CPIR,
  15719         title = {Computationally private information retrieval (extended abstract)},
  15720         booktitle = {Proceedings of the twenty-ninth annual ACM symposium on Theory of Computing (STOC '97)},
  15721         year = {1997},
  15722         pages = {304--313},
  15723         publisher = {ACM Press},
  15724         organization = {ACM Press},
  15725         address = {El Paso, TX, United States},
  15726         abstract = {Private information retrieval (PIR) schemes enable a user to access k replicated copies of a database (k 2), and privately retrieve one of the n bits of data stored in the databases. This means that the queries give each individual database no partial information (in the information theoretic sense) on the identity of the item retrieved by the user. Today, the best two database scheme (k = 2) has communication complexity O(n 1=3 ), while for any constant number, k, the best k database scheme has communication complexity O(n 1=(2k\Gamma1) ). The motivation for the present work is the question whether this complexity can be reduced if one is willing to achieve computational privacy, rather than information theoretic privacy. (This means that privacy is guaranteed only with respect to databases that are restricted to polynomial time computations.) We answer this question affirmatively, and Computer Science Dept., Technion, Haifa, Israel },
  15727         www_section = {communication complexity, private information retrieval},
  15728         isbn = {0-89791-888-6},
  15729         doi = {http://doi.acm.org/10.1145/258533.258609},
  15730         url = {http://portal.acm.org/citation.cfm?id=258533.258609},
  15731         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chor97computationally.pdf},
  15732         author = {Benny Chor and Niv Gilboa}
  15733 }
  15734 @book {Ogata97faulttolerant,
  15735         title = {Fault Tolerant Anonymous Channel},
  15736         booktitle = {Information and Communications Security},
  15737         series = {Lecture Notes in Computer Science},
  15738         volume = {1334/1997},
  15739         year = {1997},
  15740         pages = {440--444},
  15741         publisher = {Springer Berlin / Heidelberg},
  15742         organization = {Springer Berlin / Heidelberg},
  15743         abstract = {This paper describes a zero-knowledge proof that a mix in onion routing can perform in order to proof that it did route the messages properly. This allows the deployment of a mix-net where malicious mixes can be detected without using dummy-traffic to probe for correctness. Technical},
  15744         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.357\&rep=rep1\&type=url\&i=0},
  15745         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fault.dvi_.pdf},
  15746         www_section = unsorted,
  15747         author = {Wakaha Ogata and Kaoru Kurosawa and Kazue Sako and Kazunori Takatani}
  15748 }
  15749 @conference {Luby:1997:PLC:258533.258573,
  15750         title = {Practical Loss-Resilient Codes},
  15751         booktitle = {STOC 1997--Proceedings of the 29th annual ACM symposium on Theory of computing},
  15752         series = {STOC '97},
  15753         year = {1997},
  15754         month = may,
  15755         pages = {150--159},
  15756         publisher = {ACM},
  15757         organization = {ACM},
  15758         address = {El Paso, Texas, USA},
  15759         abstract = {We present a randomized construction of linear-time encodable and decodable codes that can transmit over lossy channels at rates extremely close to capacity. The encoding and decoding algorithms for these codes have fast and simple software implementations. Partial implementations of our algorithms are faster by orders of magnitude than the best software implementations of any previous algorithm for this problem. We expect these codes will be extremely useful for applications such as real-time audio and video transmission over the Internet, where lossy channels are common and fast decoding is a requirement. Despite the simplicity of the algorithms, their design and analysis are mathematically intricate. The design requires the careful choice of a random irregular bipartite graph, where the structure of the irregular graph is extremely important. We model the progress of the decoding algorithm by a set of differential equations. The solution to these equations can then be expressed as polynomials in one variable with coefficients determined by the graph structure. Based on these polynomials, we design a graph structure that guarantees successful decoding with high probability},
  15760         www_section = {loss-resilient code},
  15761         isbn = {0-89791-888-6},
  15762         doi = {http://doi.acm.org/10.1145/258533.258573},
  15763         url = {http://doi.acm.org/10.1145/258533.258573},
  15764         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2797\%20-\%20Practical\%20Loss-Resilient\%20Codes.pdf},
  15765         author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi and Daniel A. Spielman and Stemann, Volker}
  15766 }
  15767 @conference {1997_0,
  15768         title = {Privacy-enhancing Technologies for the Internet},
  15769         booktitle = {Compcon '97. Proceedings, IEEE},
  15770         year = {1997},
  15771         month = feb,
  15772         publisher = {IEEE Computer Society},
  15773         organization = {IEEE Computer Society},
  15774         address = {San Jose, CA, United States},
  15775         abstract = {The increased use of the Internet for everyday activities is bringing new threats to personal privacy. This paper gives an overview of existing and potential privacy-enhancing technologies for the Internet, as well as motivation and challenges for future work in this field},
  15776         www_section = {Internet, privacy, privacy-enhancing technology},
  15777         isbn = {0818678046},
  15778         url = {http://www.cs.berkeley.edu/~daw/papers/privacy-compcon97-www/privacy-html.html},
  15779         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Compcon\%20\%2797\%20-\%20Privacy-enhancing\%20Technologies\%20for\%20the\%20Internet.pdf},
  15780         author = {Ian Goldberg and David Wagner and Eric Brewer}
  15781 }
  15782 @article {1997_1,
  15783         title = {A Reliable Multicast Framework for Light-weight Sessions and Application Level Framing},
  15784         journal = {IEEE/ACM Trans. Netw},
  15785         volume = {5},
  15786         year = {1997},
  15787         pages = {784--803},
  15788         abstract = {This paper describes SRM (Scalable Reliable Multicast), a reliable multicast framework for light-weight sessions and application level framing. The algorithms of this framework are efficient, robust, and scale well to both very large networks and very large sessions. The SRM framework has been prototyped in wb, a distributed whiteboard application, which has been used on a global scale with sessions ranging from a few to a few hundred participants. The paper describes the principles that have guided the SRM design, including the IP multicast group delivery model, an end-to-end, receiver-based model of reliability, and the application level framing protocol model. As with unicast communications, the performance of a reliable multicast delivery algorithm depends on the underlying topology and operational environment. We investigate that dependence via analysis and simulation, and demonstrate an adaptive algorithm that uses the results of previous loss recovery events to adapt the control parameters used
  15789 for future loss recovery. With the adaptive algorithm, our reliable multicast delivery algorithm provides good performance over a wide range of underlying topologies},
  15790         www_section = {computer network performance, computer networks, Internetworking},
  15791         issn = {1063-6692},
  15792         doi = {10.1109/90.650139},
  15793         url = {http://dx.doi.org/10.1109/90.650139},
  15794         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Reliable_MultiCast1997Flyod.pdf},
  15795         author = {Floyd, Sally and Jacobson, Van and Liu, Ching-Gung and McCanne, Steven and Zhang, Lixia}
  15796 }
  15797 @article {rewebber,
  15798         title = {TAZ servers and the rewebber network: Enabling anonymous publishing on the world wide web},
  15799         journal = {First Monday},
  15800         volume = {3},
  15801         number = {4},
  15802         year = {1997},
  15803         month = {August},
  15804         abstract = {The World Wide Web has recently matured enough to provide everyday users with an extremely cheap publishing mechanism. However, the current WWW architecture makes it fundamentally difficult to provide content without identifying yourself. We examine the problem of anonymous publication on the WWW, propose a design suitable for practical deployment, and describe our implementation. Some key features of our design include universal accessibility by pre-existing clients, short persistent names, security against social, legal, and political pressure, protection against abuse, and good performance},
  15805         www_section = {anonymous publishing},
  15806         doi = {10.1.1.41.4031},
  15807         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.4031},
  15808         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.41.4031.pdf},
  15809         author = {Ian Goldberg and David Wagner}
  15810 }
  15811 @conference {716407,
  15812         title = {An Empirical Study of Delta Algorithms},
  15813         booktitle = {ICSE '96: Proceedings of the SCM-6 Workshop on System Configuration Management},
  15814         year = {1996},
  15815         pages = {49--66},
  15816         publisher = {Springer-Verlag},
  15817         organization = {Springer-Verlag},
  15818         address = {London, UK},
  15819         abstract = {Delta algorithms compress data by encoding one file in terms of another. This type of compression is useful in a number of situations: storing multiple versions of data, distributing updates, storing backups, transmitting video sequences, and others. This paper studies the performance parameters of several delta algorithms, using a benchmark of over 1300 pairs of files taken from two successive releases of GNU software. Results indicate that modern delta compression algorithms based on Ziv-Lempel techniques significantly outperform diff, a popular but older delta compressor, in terms of compression ratio. The modern compressors also correlate better with the actual difference between files; one of them is even faster than diff in both compression and decompression speed},
  15820         isbn = {3-540-61964-X},
  15821         doi = {10.1007/BFb0023076},
  15822         url = {http://www.springerlink.com/content/584k258285p18x4g/},
  15823         www_section = unsorted,
  15824         author = {Hunt, James J. and Vo, Kiem-Phong and Tichy, Walter F.}
  15825 }
  15826 @conference {1267576,
  15827         title = {Establishing identity without certification authorities},
  15828         booktitle = {SSYM'96: Proceedings of the 6th conference on USENIX Security Symposium, Focusing on Applications of Cryptography},
  15829         year = {1996},
  15830         pages = {7--7},
  15831         publisher = {USENIX Association},
  15832         organization = {USENIX Association},
  15833         address = {Berkeley, CA, USA},
  15834         abstract = {this paper is that a traditional identity certificate is neither necessary nor sufficient for this purpose. It is especially useless if the two parties concerned did not have the foresight to obtain such certificates before desiring to open a secure channel. There are many methods for establishing identity without using certificates from trusted certification authorities. The relationship between verifier and subject guides the choice of method. Many of these relationships have easy, straight-forward methods for binding a public key to an identity, using a broadcast channel or 1:1 meetings, but one relationship makes it especially difficult. That relationship is one with an old friend with whom you had lost touch but who appears now to be available on the net. You make contact and share a few exchanges which suggest to you that this is, indeed, your old friend. Then you want to form a secure channel in order to carry on a more extensive conversation in private. This case is subject to the man-in-themiddle attack. For this case, a protocol is presented which binds a pair of identities to a pair of public keys without using any certificates issued by a trusted CA. The apparent direct conflict between conventional wisdom and the thesis of this paper lies in the definition of the word "identity" -- a word which is commonly left undefined in discussions of certification},
  15835         www_section = {certificate revocation, public key cryptography},
  15836         url = {http://portal.acm.org/citation.cfm?id=1267576$\#$},
  15837         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.31.7263.pdf},
  15838         author = {Ellison, Carl M.}
  15839 }
  15840 @conference {Anderson96theeternity,
  15841         title = {The Eternity Service},
  15842         booktitle = {Pragocrypt'96--Proceedings of the 1st International Conference on the Theory and Applications of Crytology},
  15843         year = {1996},
  15844         month = sep,
  15845         pages = {242--252},
  15846         address = {Prague, CZ},
  15847         abstract = {The Internet was designed to provide a communications channel that is as resistant to denial of service attacks as human ingenuity can make it. In this note, we propose the construction of a storage medium with similar properties. The basic idea is to use redundancy and scattering techniques to replicate data across a large set of machines (such as the Internet), and add anonymity mechanisms to drive up the cost of selective service denial attacks. The detailed design of this service is an interesting scientific problem, and is not merely academic: the service may be vital in safeguarding individual rights against new threats posed by the spread of electronic publishing},
  15848         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.1952\&rep=rep1\&type=pdf},
  15849         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/eternity.pdf},
  15850         www_section = unsorted,
  15851         author = {Ross Anderson}
  15852 }
  15853 @conference {onion-routing:ih96,
  15854         title = {Hiding Routing Information},
  15855         booktitle = {Proceedings of Information Hiding: First International Workshop},
  15856         year = {1996},
  15857         month = {May},
  15858         pages = {137--150},
  15859         publisher = {Springer-Verlag, LNCS 1174},
  15860         organization = {Springer-Verlag, LNCS 1174},
  15861         abstract = {Abstract. This paper describes an architecture, Onion Routing, that limits a network's vulnerability to trac analysis. The architecture provides anonymous socket connections by means of proxy servers. It provides real-time, bi-directional, nonymous communication for any protocol that can be adapted to use a proxy service. Specically, the architecture provides for bi-directional communication even though no-one but the initiator's proxy server knows anything but previous and next hops in the communication chain. This implies that neither the respondent nor his proxy server nor any external observer need know the identity of the initiator or his proxy server. A prototype of Onion Routing has been implemented. This prototype works with HTTP (World Wide Web) proxies. In addition, an analogous proxy for TELNET has been implemented. Proxies for FTP and SMTP are under development},
  15862         www_section = {communication chain, onion routing, traffic analysis},
  15863         isbn = {3-540-61996-8},
  15864         url = {http://portal.acm.org/citation.cfm?id=731526},
  15865         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IH-1996.pdf},
  15866         author = {David Goldschlag and Michael Reed and Paul Syverson},
  15867         editor = {Ross Anderson}
  15868 }
  15869 @conference {Fargier:1996:MCS:1892875.1892901,
  15870         title = {Mixed constraint satisfaction: a framework for decision problems under incomplete knowledge},
  15871         booktitle = {AAAI'96--Proceedings of the 13th National Conference on Artificial Intelligence},
  15872         series = {AAAI'96},
  15873         year = {1996},
  15874         month = aug,
  15875         pages = {175--180},
  15876         publisher = {AAAI Press},
  15877         organization = {AAAI Press},
  15878         address = {Portland, OR, United States},
  15879         abstract = {Constraint satisfaction is a powerful tool for representing and solving decision problems with complete knowledge about the world. We extend the CSP framework so as to represent decision problems under incomplete knowledge. The basis of the extension consists in a distinction between controllable and uncontrollable variables -- hence the terminology "mixed CSP" -- and a "solution" gives actually a conditional decision. We study the complexity of deciding the consistency of a mixed CSP. As the problem is generally intractable, we propose an algorithm for finding an approximate solution},
  15880         www_section = {algorithms, constraint satisfaction, decision problem, framework, imcomplete knowledge, mixed CSP},
  15881         isbn = {0-262-51091-X},
  15882         url = {http://dl.acm.org/citation.cfm?id=1892875.1892901},
  15883         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAAI\%2796\%20-\%20Mixed\%20constraint\%20satisfaction.pdf},
  15884         author = {Fargier, H{\'e}l{\`e}ne and Lang, J{\'e}r{\^o}me and Schiex, Thomas}
  15885 }
  15886 @conference {Gulcu96mixingemail,
  15887         title = {Mixing email with babel},
  15888         booktitle = {Symposium on Network and Distributed System Security},
  15889         year = {1996},
  15890         pages = {2--16},
  15891         abstract = {Increasingly large numbers of people communicate today via electronic means such as email or news forums. One of the basic properties of the current electronic communication means is the identification of the end-points. However, at times it is desirable or even critical to hide the identity and/or whereabouts of the end-points (e.g., human users) involved. This paper discusses the goals and desired properties of anonymous email in general and introduces the design and salient features of Babel anonymous remailer. Babel allows email users to converse electronically while remaining anonymous with respect to each other and to other-- even hostile--parties. A range of attacks and corresponding countermeasures is considered. An attempt is made to formalize and quantify certain dimensions of anonymity and untraceable communication},
  15892         url = {http://eprints.kfupm.edu.sa/50994/1/50994.pdf},
  15893         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/babel.pdf},
  15894         www_section = unsorted,
  15895         author = {Ceki Gulcu and Gene Tsudik}
  15896 }
  15897 @article {remailer-history,
  15898         title = {Prospects for Remailers},
  15899         journal = {First Monday},
  15900         volume = {1},
  15901         number = {2},
  15902         year = {1996},
  15903         month = {August},
  15904         abstract = {Remailers have permitted Internet users to take advantage of the medium as a means to communicate with others globally on sensitive issues while maintaining a high degree of privacy. Recent events have clearly indicated that privacy is increasingly at risk on the global networks. Individual efforts have, so far, worked well in maintaining for most Internet users a modicum of anonymity. With the growth of increasingly sophisticated techniques to defeat anonymity, there will be a need for both standards and policies to continue to make privacy on the Internet a priority},
  15905         url = {http://131.193.153.231/www/issues/issue2/remailers/index.html},
  15906         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Prospects\%20for\%20Remailers.pdf},
  15907         www_section = unsorted,
  15908         author = {Sameer Parekh}
  15909 }
  15910 @booklet {Stemm96reducingpower,
  15911         title = {Reducing Power Consumption of Network Interfaces in Hand-Held Devices (Extended Abstract)},
  15912         year = {1996},
  15913         abstract = {An important issue to be addressed for the next generation of wirelessly-connected hand-held devices is battery longevity. In this paper we examine this issue from the point of view of the Network Interface (NI). In particular, we measure the power usage of two PDAs, the Apple Newton Messagepad and Sony Magic Link, and four NIs, the Metricom Ricochet Wireless Modem, the AT\&T Wavelan operating at 915 MHz and 2.4 GHz, and the IBM Infrared Wireless LAN Adapter. These measurements clearly indicate that the power drained by the network interface constitutes a large fraction of the total power used by the PDA. We also conduct trace-driven simulation experiments and show that by using applicationspecific policies it is possible to },
  15914         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.8384},
  15915         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.39.8384.pdf},
  15916         www_section = unsorted,
  15917         url = {https://bibliography.gnunet.org},
  15918         author = {Mark Stemm and Paul Gauthier and Daishi Harada and Katz, Randy H.}
  15919 }
  15920 @conference {672869,
  15921         title = {Balanced Distributed Search Trees Do Not Exist},
  15922         booktitle = {WADS '95: Proceedings of the 4th International Workshop on Algorithms and Data Structures},
  15923         year = {1995},
  15924         pages = {50--61},
  15925         publisher = {Springer-Verlag},
  15926         organization = {Springer-Verlag},
  15927         address = {London, UK},
  15928         abstract = {This paper is a first step towards an understanding of the inherent limitations of distributed data structures. We propose a model of distributed search trees that is based on few natural assumptions. We prove that any class of trees within our model satisfies a lower bound of \Omega\Gamma p m) on the worst case height of distributed search trees for m keys. That is, unlike in the single site case, balance in the sense that the tree height satisfies a logarithmic upper bound cannot be achieved. This is true although each node is allowed to have arbitrary degree (note that in this case, the height of a single site search tree is trivially bounded by one). By proposing a method that generates trees of height O( p m), we show the bound to be tight. 1 Introduction Distributed data structures have attracted considerable attention in the past few years. From a practical viewpoint, this is due to the increasing availability of networks of workstations},
  15929         isbn = {3-540-60220-8},
  15930         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.4081},
  15931         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.34.4081.pdf},
  15932         www_section = unsorted,
  15933         author = {Kr{\"o}ll, Brigitte and Widmayer, Peter}
  15934 }
  15935 @article {224068,
  15936         title = {Exploiting weak connectivity for mobile file access},
  15937         journal = {SIGOPS Oper. Syst. Rev},
  15938         volume = {29},
  15939         number = {5},
  15940         year = {1995},
  15941         pages = {143--155},
  15942         publisher = {ACM},
  15943         address = {New York, NY, USA},
  15944         issn = {0163-5980},
  15945         doi = {10.1145/224057.224068},
  15946         url = {http://portal.acm.org/citation.cfm?id=224068$\#$},
  15947         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/s15.pdf},
  15948         www_section = unsorted,
  15949         author = {Lily B. Mummert and Maria Ebling and Satyanarayanan, Mahadev}
  15950 }
  15951 @booklet {Ramanathan95thefinal,
  15952         title = {The final frontier: Embedding networked sensors in the soil},
  15953         year = {1995},
  15954         publisher = {Lecture Notes in Computer Science},
  15955         abstract = {This paper presents the first systematic design of a robust sensing system suited for the challenges presented by soil environments. We describe three soil deployments we have undertaken: in Bangladesh, and in California at the James Reserve and in the San Joaquin River basin. We discuss our experiences and lessons learned in deploying soil sensors. We present data from each deployment and evaluate our techniques for improving the information yield from these systems. Our most notable results include the following: in-situ calibration techniques to postpone labor-intensive and soil disruptive calibration events developed at the James Reserve; achieving a 91 \% network yield from a Mica2 wireless sensing system without end-to-end reliability in Bangladesh; and the javelin, a new platform that facilitates the deployment, replacement and in-situ calibration of soil sensors, deployed in the San Joaquin River basin. Our techniques to increase information yield have already led to scientifically promising results, including previously unexpected diurnal cycles in various soil chemistry parameters across several deployments },
  15956         www_section = {sensor networks, wireless sensor network},
  15957         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.7766},
  15958         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.120.7766.pdf},
  15959         author = {Nithya Ramanathan and Tom Schoellhammer and Deborah Estrin and Mark Hansen and Tom Harmon and Eddie Kohler and Mani Srivastava}
  15960 }
  15961 @conference {cooper,
  15962         title = {Preserving Privacy in a Network of Mobile Computers},
  15963         booktitle = {Proceedings of the 1995 IEEE Symposium on Security and Privacy},
  15964         year = {1995},
  15965         month = may,
  15966         publisher = {IEEE Computer Society},
  15967         organization = {IEEE Computer Society},
  15968         abstract = {Even as wireless networks create the potential for access to information from mobile platforms, they pose a problem for privacy. In order to retrieve messages, users must periodically poll the network. The information that the user must give to the network could potentially be used to track that user. However, the movements of the user can also be used to hide the user's location if the protocols for sending and retrieving messages are carefully designed. We have developed a replicated memory service which allows users to read from memory without revealing which memory locations they are reading. Unlike previous protocols, our protocol is efficient in its use of computation and bandwidth. We show how this protocol can be used in conjunction with existing privacy preserving protocols to allow a user of a mobile computer to maintain privacy despite active attacks},
  15969         url = {http://portal.acm.org/citation.cfm?id=882491.884247},
  15970         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cooper.pdf},
  15971         www_section = unsorted,
  15972         author = {David A. Cooper and Kenneth P. Birman}
  15973 }
  15974 @conference {pir,
  15975         title = {Private Information Retrieval},
  15976         booktitle = {Proceedings of the IEEE Symposium on Foundations of Computer Science},
  15977         year = {1995},
  15978         pages = {41--50},
  15979         publisher = {ACM  New York, NY, USA},
  15980         organization = {ACM  New York, NY, USA},
  15981         abstract = {Publicly accessible databases are an indispensable resource for retrieving up-to-date information. But they also pose a significant risk to the privacy of the user, since a curious database operator can follow the user's queries and infer what the user is after. Indeed, in cases where the users' intentions are to be kept secret, users are often cautious about accessing the database. It can be shown that when accessing a single database, to completely guarantee the privacy of the user, the whole database should be down-loaded; namely n bits should be communicated (where n is the number of bits in the database).In this work, we investigate whether by replicating the database, more efficient solutions to the private retrieval problem can be obtained. We describe schemes that enable a user to access k replicated copies of a database (k>=2) and privately retrieve information stored in the database. This means that each individual server (holding a replicated copy of the database) gets no information on the identity of the item retrieved by the user. Our schemes use the replication to gain substantial saving. In particular, we present a two-server scheme with communication complexity O(n1/3)},
  15982         doi = {http://doi.acm.org/10.1145/293347.293350},
  15983         url = {http://portal.acm.org/citation.cfm?id=293347.293350},
  15984         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pir.pdf},
  15985         www_section = unsorted,
  15986         author = {Benny Chor and Oded Goldreich and Eyal Kushilevitz and Madhu Sudan}
  15987 }
  15988 @conference {SK,
  15989         title = {Receipt-Free MIX-Type Voting Scheme--A Practical Solution to the Implementation of a Voting Booth},
  15990         booktitle = {Proceedings of EUROCRYPT 1995},
  15991         year = {1995},
  15992         publisher = {Springer-Verlag},
  15993         organization = {Springer-Verlag},
  15994         abstract = {We present a receipt-free voting scheme based on a mix- type anonymous channel [Cha81, PIK93]. The receipt-freeness property [BT94] enables voters to hide how they have voted even from a powerful adversary who is trying to coerce him. The work of [BT94] gave the first solution using a voting booth, which is a hardware assumption not unlike that in current physical elections. In our proposed scheme, we reduce the physical assumptions required to obtain receipt-freeness. Our sole physical assumption is the existence of a private channel through which the center can send the voter a message without fear of eavesdropping},
  15995         isbn = {978-3-540-59409-3},
  15996         doi = {10.1007/3-540-49264-X},
  15997         url = {http://www.springerlink.com/content/jhf7ccxn2fj2gfum/},
  15998         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SK.pdf},
  15999         www_section = unsorted,
  16000         author = {Joe Kilian and Kazue Sako}
  16001 }
  16002 @booklet {Demers94thebayou,
  16003         title = {The Bayou Architecture: Support for Data Sharing among Mobile Users},
  16004         year = {1994},
  16005         abstract = {The Bayou System is a platform of replicated, highly-available, variable-consistency, mobile databases on which to build collaborative applications. This paper presents the preliminary system architecture along with the design goals that influenced it. We take a fresh, bottom-up and critical look at the requirements of mobile computing applications and carefully pull together both new and existing techniques into an overall architecture that meets these requirements. Our emphasis is on supporting application-specific conflict detection and resolution and on providing application controlled inconsistency},
  16006         www_section = {reliability, reputation},
  16007         doi = {10.1109/WMCSA.1994.37},
  16008         url = {http://portal.acm.org/citation.cfm?id=1440028},
  16009         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.40.8955.pdf},
  16010         www_section = unsorted,
  16011         author = {Alan Demers and Karin Petersen and Mike Spreitzer and Douglas Terry and Marvin Theimer and Brent Welch}
  16012 }
  16013 @conference {1267093,
  16014         title = {File system design for an NFS file server appliance},
  16015         booktitle = {WTEC'94: Proceedings of the USENIX Winter 1994 Technical Conference on USENIX Winter 1994 Technical Conference},
  16016         year = {1994},
  16017         pages = {19--19},
  16018         publisher = {USENIX Association},
  16019         organization = {USENIX Association},
  16020         address = {Berkeley, CA, USA},
  16021         abstract = {Network Appliance Corporation recently began shipping a new kind of network server called an NFS file server appliance, which is a dedicated server whose sole function is to provide NFS file service. The file system requirements for an NFS appliance are different from those for a general-purpose UNIX system, both because an NFS appliance must be optimized for network file access and because an appliance must be easy to use.
  16022 
  16023 This paper describes WAFL (Write Anywhere File Layout), which is a file system designed specifically to work in an NFS appliance. The primary focus is on the algorithms and data structures that WAFL uses to implement Snapshotst, which are read-only clones of the active file system. WAFL uses a copy-on-write technique to minimize the disk space that Snapshots consume. This paper also describes how WAFL uses Snapshots to eliminate the need for file system consistency checking after an unclean shutdown},
  16024         url = {http://portal.acm.org/citation.cfm?id=1267093$\#$},
  16025         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.40.3691.pdf},
  16026         www_section = unsorted,
  16027         author = {Hitz, Dave and Lau, James and Malcolm, Michael}
  16028 }
  16029 @conference {Manber94findingsimilar,
  16030         title = {Finding Similar Files in a Large File System},
  16031         booktitle = {USENIX WINTER 1994 TECHNICAL CONFERENCE},
  16032         year = {1994},
  16033         pages = {1--10},
  16034         abstract = {We present a tool, called sif, for finding all similar files in a large file system. Files are considered similar if they have significant number of common pieces, even if they are very different otherwise. For example, one file may be contained, possibly with some changes, in another file, or a file may be a reorganization of another file. The running time for finding all groups of similar files, even for as little as 25\% similarity, is on the order of 500MB to 1GB an hour. The amount of similarity and several other customized parameters can be determined by the user at a post-processing stage, which is very fast. Sif can also be used to very quickly identify all similar files to a query file using a preprocessed index. Application of sif can be found in file management, information collecting (to remove duplicates), program reuse, file synchronization, data compression, and maybe even plagiarism detection. 1. Introduction Our goal is to identify files that came from the same source },
  16035         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.3222},
  16036         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3222.pdf},
  16037         www_section = unsorted,
  16038         author = {Udi Manber}
  16039 }
  16040 @booklet {898770,
  16041         title = {Libckpt: Transparent Checkpointing under Unix},
  16042         year = {1994},
  16043         publisher = {University of Tennessee},
  16044         address = {Knoxville, TN, USA},
  16045         abstract = {Checkpointing is a simple technique for rollback recovery: the state of an executing program is periodically saved to a disk file from which it can be recovered after a failure. While recent research has developed a collection of powerful techniques for minimizing the overhead of writing checkpoint files, checkpointing remains unavailable to most application developers. In this paper we describe libckpt, a portable checkpointing tool for Unix that implements all applicable performance optimizations which are reported in the literature. While libckpt can be used in a mode which is almost totally transparent to the programmer, it also supports the incorporation of user directives into the creation of checkpoints. This user-directed checkpointing is an innovation which is unique to our work. 1 Introduction Consider a programmer who has developed an application which will take a long time to execute, say five days. Two days into the computation, the processor on which the application is},
  16046         www_section = {checkpointing, performance analysis},
  16047         url = {http://portal.acm.org/citation.cfm?id=898770$\#$},
  16048         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.55.257.pdf},
  16049         author = {James S. Plank and Beck, Micah and Kingsley, Gerry and Li, Kai}
  16050 }
  16051 @article {1993_0,
  16052         title = {Allocative Efficiency of Markets with Zero-Intelligence Traders: Market as a Partial Substitute for Individual Rationality},
  16053         journal = {Journal of Political Economy},
  16054         volume = {101},
  16055         year = {1993},
  16056         month = feb,
  16057         pages = {119--137},
  16058         abstract = {We report market experiments in which human traders are replaced by "zero-intelligence" programs that submit random bids and offers. Imposing a budget constraint (i.e., not permitting traders to sell below their costs or buy above their values) is sufficient to raise the allocative efficiency of these auctions close to 100 percent. Allocative efficiency of a double auction derives largely from its structure, independent of traders' motivation, intelligence, or learning. Adam Smith's invisible hand may be more powerful than some may have thought; it can generate aggregate rationality not only from individual rationality but also from individual irrationality},
  16059         www_section = {allocative efficiency, double auction, market, zero-intelligence trader},
  16060         url = {http://www.jstor.org/stable/2138676},
  16061         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/JPE\%20\%281993\%29\%20-\%20Gode\%20\%26\%20Sunder\%20-\%20Allocative\%20Efficiency.pdf},
  16062         author = {Dhananjay K. Gode and Shyam Sunder}
  16063 }
  16064 @conference {rackoff93cryptographic,
  16065         title = {Cryptographic Defense Against Traffic Analysis},
  16066         booktitle = {Proceedings of ACM Symposium on Theory of Computing},
  16067         year = {1993},
  16068         pages = {672--681},
  16069         publisher = {ACM  New York, NY, USA},
  16070         organization = {ACM  New York, NY, USA},
  16071         address = {San Diego, California, United States},
  16072         isbn = {0-89791-591-7},
  16073         doi = {http://doi.acm.org/10.1145/167088.167260},
  16074         url = {http://portal.acm.org/citation.cfm?id=167088.167260},
  16075         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rackoff93cryptographic.pdf},
  16076         www_section = unsorted,
  16077         author = {Charles Rackoff and Daniel R. Simon}
  16078 }
  16079 @conference {PIK,
  16080         title = {Efficient anonymous channel and all/nothing election scheme},
  16081         booktitle = {Proceedings of EUROCRYPT 1993},
  16082         year = {1993},
  16083         pages = {248--259},
  16084         publisher = {Springer-Verlag, LNCS 765},
  16085         organization = {Springer-Verlag, LNCS 765},
  16086         address = {Lofthus, Norway},
  16087         abstract = {The contribution of this paper are twofold. First, we present an efficient computationally secure anonymous channel which has no problme of ciphertext length expansion. The length is irrelevant to the number of MIXes(control centers). It improves the efficiency of Chaums's election scheme based on the MIX net automatically. Second, we show an election scheme which satisfies fairness. That is, if some vote is disrupted, no one obtains any infromation about all the other votes. Each voter sends O(nk) bits so that the probability of the fairness is 1-2^-k, where n is the bit length of the ciphertext},
  16088         isbn = {3-540-57600-2},
  16089         url = {http://portal.acm.org/citation.cfm?id=188307.188351},
  16090         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mix.pdf},
  16091         www_section = unsorted,
  16092         author = {Choonsik Park and Kazutomo Itoh and Kaoru Kurosawa}
  16093 }
  16094 @book {1993_1,
  16095         title = {Elliptic Curve Public Key Cryptosystems},
  16096         series = {The Springer International Series in Engineering and Computer Science},
  16097         volume = {234},
  16098         year = {1993},
  16099         pages = {0--144},
  16100         publisher = {Springer},
  16101         organization = {Springer},
  16102         abstract = {Elliptic curves have been intensively studied in algebraic geometry and number theory. In recent years they have been used in devising efficient algorithms for factoring integers and primality proving, and in the construction of public key cryptosystems.
  16103 Elliptic Curve Public Key Cryptosystems provides an up-to-date and self-contained treatment of elliptic curve-based public key cryptology. Elliptic curve cryptosystems potentially provide equivalent security to the existing public key schemes, but with shorter key lengths. Having short key lengths means smaller bandwidth and memory requirements and can be a crucial factor in some applications, for example the design of smart card systems. The book examines various issues which arise in the secure and efficient implementation of elliptic curve systems.
  16104 Elliptic Curve Public Key Cryptosystems is a valuable reference resource for researchers in academia, government and industry who are concerned with issues of data security. Because of the comprehensive treatment, the book is also suitable for use as a text for advanced courses on the subject},
  16105         www_section = {algebraic geometry, elliptic curve cryptography, number theory, public key cryptosystem},
  16106         isbn = {978-0-7923-9368-9},
  16107         url = {http://books.google.com/books/about/Elliptic_curve_public_key_cryptosystems.html?id=bIb54ShKS68C},
  16108         author = {Alfred J. Menezes}
  16109 }
  16110 @booklet {Liedtke93apersistent,
  16111         title = {A Persistent System in Real Use--Experiences of the First 13 Years},
  16112         year = {1993},
  16113         abstract = {Eumel and its advanced successor L3 are operating systems built by GMD which have been used, for 13 years and 4 years respectively, as production systems in business and education. More than 2000 Eumel systems and 500 L3 systems have been shipped since 1979 and 1988. Both systems rely heavily on the paradigm of persistence (including fault-surviving persistence). Both data and processes, in principle all objects are persistent, files are implemented by means of persistent objects (not vice versa) etc. In addition to the principles and mechanisms of Eumel /L3, general and specific experiences are described: these relate to the design, implementation and maintenance of the systems over the last 13 years. For general purpose timesharing systems the idea is powerful and elegant, it can be efficiently implemented, but making a system really usable is hard work},
  16114         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.53.7112},
  16115         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.53.7112.pdf},
  16116         www_section = unsorted,
  16117         author = {Jochen Liedtke}
  16118 }
  16119 @inproceedings{627372,
  16120         author = {C. Beounes and M. Aguera and J. Arlat and S. Bachmann and C. Bourdeau and J. -. Doucet and K. Kanoun and J. -. Laprie and S. Metge and J. Moreira de Souza and D. Powell and P. Spiesser},
  16121         booktitle = {Proceedings of FTCS-23 The Twenty-Third International Symposium on Fault-Tolerant Computing},
  16122         title = {SURF-2: A program for dependability evaluation of complex hardware and software systems},
  16123         year = {1993},
  16124         volume = {},
  16125         number = {},
  16126         pages = {668--673},
  16127         abstract = {SURF-2, a software tool for evaluating system dependability, is described. It is especially designed for an evaluation-based system design approach in which multiple design solutions need to be compared from the dependability viewpoint. System behavior may be modeled either by Markov chains or by generalized stochastic Petri nets. The tool supports the evaluation of different measures of dependability, including pointwise measures, asymptotic measures, mean sojourn times and, by superposing a reward structure on the behavior model, reward measures such as expected performance or cost},
  16128         www_section = {software reliability, system behaviour, SURF-2, dependability evaluation, complex hardware and software systems, software tool, system dependability, evaluation-based system design approach, multiple design solutions, Markov chains, generalized stochastic Petri nets, measures of dependability, pointwise measures, asymptotic measures, mean sojourn times, reward structure, reward measures, performance, Hardware, Software systems, Stochastic systems, Petri nets, Software tools, Process design, Stochastic processes, Humans, Costs, Performance evaluation},
  16129         doi = {10.1109/FTCS.1993.627372},
  16130         ISSN = {0731-3071},
  16131         isbn = {0-8186-3680-7},
  16132         url = {https://ieeexplore.ieee.org/document/627372/authors#authors},
  16133         month={June},}
  16134 @conference {DBLP:conf/eurocrypt/ChaumP92,
  16135         title = {Transferred Cash Grows in Size},
  16136         booktitle = {EUROCRYPT'92 Workshop on the Theory and Application of of Cryptographic Techniques},
  16137         series = {Lecture Notes in Computer Science},
  16138         volume = {658},
  16139         year = {1992},
  16140         month = may,
  16141         pages = {390--407},
  16142         publisher = {Springer},
  16143         organization = {Springer},
  16144         address = {Balatonf{\"u}red, Hungary},
  16145         abstract = {All known methods for transferring electronic money have the disadvantages that the number of bits needed to represent the money after each payment increases, and that a payer can recognize his money if he sees it later in the chain of payments (forward traceability). This paper shows that it is impossible to construct an electronic money system providing transferability without the property that the money grows when transferred. Furthermore it is argued that an unlimited powerful user can always recognize his money later. Finally, the lower bounds on the size of transferred electronic money are discussed in terms of secret sharing schemes},
  16146         www_section = {electronic money, forward traceability, secret shraing, transfer},
  16147         isbn = {3-540-56413-6},
  16148         doi = {10.1007/3-540-47555-9_32},
  16149         url = {10.1007/3-540-47555-9_32},
  16150         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2792_-_Chaun_\%26_Pedersen_-_Transferred_cash_grows_in_size.pdf},
  16151         author = {David Chaum and Torben P. Pedersen}
  16152 }
  16153 @article {Yokoo91distributedconstraint,
  16154         title = {Distributed Constraint Optimization as a Formal Model of Partially Adversarial Cooperation},
  16155         number = {CSE-TR-101-9},
  16156         year = {1991},
  16157         institution = {University of Michigan},
  16158         type = {Tech report},
  16159         address = {Ann Arbor, MI, United States},
  16160         abstract = {In this paper, we argue that partially adversarial and partially cooperative (PARC) problems in distributed articial intelligence can be mapped into a formalism called distributed constraint optimization problems (DCOPs), which generalize distributed constraint satisfaction problems [Yokoo, et al. 90] by introducing weak constraints (preferences). We discuss several solution criteria for DCOP and clarify the relation between these criteria and dierent levels of agent rationality [Rosenschein and Genesereth 85], and show the algorithms for solving DCOPs in which agents incrementally exchange only necessary information to converge on a mutually satisable bsolution},
  16161         www_section = {artificial intelligence, DCOP, PARC, partially adversial cooperation},
  16162         journal = {unknown},
  16163         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20report\%20-\%20DCOP\%20as\%20a\%20formal\%20model\%20of\%20PARC.pdf},
  16164         url = {https://bibliography.gnunet.org},
  16165         author = {Makoto Yokoo and Edmund H. Durfee}
  16166 }
  16167 @conference {Deswarte91intrusiontolerance,
  16168         title = {Intrusion Tolerance in Distributed Computing Systems},
  16169         booktitle = {In Proceedings of the IEEE Symposium on Research in Security and Privacy},
  16170         year = {1991},
  16171         pages = {110--121},
  16172         abstract = {An intrusion-tolerant distributed system is a system which is designed so that any intrusion into apart of the system will not endanger confidentiality, integrity and availability. This approach is suitable for distributed systems, because distribution enables isolation of elements so that an intrusion gives physical access to only a part of the system. By intrusion, we mean not only computer break-ins by non-registered people, but also attempts by registered users to exceed or to abuse their privileges. In particular, possible malice of security administrators is taken into account. This paper describes how some functions of distributed systems can be designed to tolerate intrusions, in particular security functions such as user authentication and authorization, and application functions such as file management},
  16173         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.56.9968},
  16174         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.9968.pdf},
  16175         www_section = unsorted,
  16176         author = {Yves Deswarte and Laurent Blain and Jean-charles Fabre}
  16177 }
  16178 @conference {ISDN-mixes,
  16179         title = {ISDN-mixes: Untraceable communication with very small bandwidth overhead},
  16180         booktitle = {Proceedings of the GI/ITG Conference on Communication in Distributed Systems},
  16181         year = {1991},
  16182         month = feb,
  16183         pages = {451--463},
  16184         publisher = {Springer-Verlag  London, UK},
  16185         organization = {Springer-Verlag  London, UK},
  16186         abstract = {Untraceable communication for services like telephony is often considered infeasible in the near future because of bandwidth limitations. We present a technique, called ISDN-MIXes, which shows that this is not the case. As little changes as possible are made to the narrowband-ISDN planned by the PTTs. In particular, we assume the same subscriber lines with the same bit rate, and the same long-distance network between local exchanges, and we offer the same services. ISDN-MIXes are a combination of a new variant of CHAUM's MIXes, dummy traffic on the subscriber lines (where this needs no additional bandwidth), and broadcast of incoming-call messages in the subscriber-area},
  16187         isbn = {3-540-53721-X},
  16188         url = {http://portal.acm.org/citation.cfm?id=645662.664536},
  16189         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.43.4892.pdf},
  16190         www_section = unsorted,
  16191         author = {Andreas Pfitzmann and Birgit Pfitzmann and Michael Waidner}
  16192 }
  16193 @conference {Waidner:1990:DCD:111563.111630,
  16194         title = {The dining cryptographers in the disco: unconditional sender and recipient untraceability with computationally secure serviceability},
  16195         booktitle = {EUROCRYPT'89--Proceedings of the workshop on the theory and application of cryptographic techniques on Advances in cryptology},
  16196         series = {EUROCRYPT '89},
  16197         year = {1990},
  16198         month = apr,
  16199         pages = {0--690},
  16200         publisher = {Springer-Verlag New York, Inc},
  16201         organization = {Springer-Verlag New York, Inc},
  16202         address = {Houthalen, Belgium},
  16203         abstract = {In Journal of Cryptology 1/1 (1988) 65-75 (= [Chau_88]), David Chaum describes a beautiful technique, the DC-net, which should allow participants to send and receive messages anonymously in an arbitrary network. The untraceability of the senders is proved to be unconditional, but that of the recipients implicitly
  16204 assumes a reliable broadcast network. This assumption is unrealistic in some networks, but it can be removed completely by using the fail-stop key generation schemes by Waidner (these proceedings, =[Waid_89]). In both cases, however, each participant can untraceably and permanently disrupt the entireDC-net.
  16205 We present a protocol which guarantees unconditional untraceability, the original goal of the DC-net, onthe inseparability assumption (i.e. the attacker must be unable to prevent honest participants fromcommunicating, which is considerably less than reliable broadcast), and computationally secureserviceability: Computationally restricted disrupters can be identified and removed from the DC-net.
  16206 On the one hand, our solution is based on the lovely idea by David Chaum [Chau_88 {\textsection} 2.5] of setting traps for disrupters. He suggests a scheme to guarantee unconditional untraceability and computationally secure serviceability, too, but on the reliable broadcast assumption. The same scheme seems to be used by Bos and den Boer (these proceedings, = [BoBo_89]). We show that this scheme needs some changes and refinements before being secure, even on the reliable broadcast assumption.
  16207 On the other hand, our solution is based on the idea of digital signatures whose forgery by an unexpectedly powerful attacker is provable, which might be of independent interest. We propose such a (one-time) signature scheme based on claw-free permutation pairs; the forgery of signatures is equivalent to finding claws, thus in a special case to the factoring problem. In particular, with such signatures we can, for the first time, realize fail-stop Byzantine Agreement, and also adaptive Byzantine Agreement, i.e. Byzantine Agreement which can only be disrupted by an attacker who controls at least a third of all participants and who can forge signatures.
  16208 We also sketch applications of these signatures to a payment system, solving disputes about shared secrets, and signatures which cannot be shown round},
  16209         www_section = {anonymity, arbitrary network, cryptology, DC-net},
  16210         isbn = {3-540-53433-4},
  16211         url = {http://dl.acm.org/citation.cfm?id=111563.111630},
  16212         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2789\%20-\%20Waidner\%26Pfitzmann\%20-\%20The\%20dining\%20cryptographers\%20in\%20the\%20disco\%20.pdf},
  16213         author = {Michael Waidner and Birgit Pfitzmann}
  16214 }
  16215 @article {78977,
  16216         title = {Skip lists: a probabilistic alternative to balanced trees},
  16217         journal = {Commun. ACM},
  16218         volume = {33},
  16219         number = {6},
  16220         year = {1990},
  16221         pages = {668--676},
  16222         publisher = {ACM},
  16223         address = {New York, NY, USA},
  16224         abstract = {Skip lists are data structures that use probabilistic balancing rather than strictly enforced balancing. As a result, the algorithms for insertion and deletion in skip lists are much simpler and significantly faster than equivalent algorithms for balanced trees},
  16225         www_section = {data structures, search},
  16226         issn = {0001-0782},
  16227         doi = {10.1145/78973.78977},
  16228         url = {http://portal.acm.org/citation.cfm?id=78977$\#$},
  16229         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.85.9211.pdf},
  16230         author = {Pugh, William}
  16231 }
  16232 @conference {1988_0,
  16233         title = {Completeness Theorems for Non-cryptographic Fault-tolerant Distributed Computation},
  16234         booktitle = {Proceedings of the Twentieth Annual ACM Symposium on Theory of Computing},
  16235         year = {1988},
  16236         publisher = {ACM},
  16237         organization = {ACM},
  16238         address = {New York, NY, USA},
  16239         abstract = {Every function of n inputs can be efficiently computed by a complete network of n processors in such a way that: If no faults occur, no set of size t < n/2 of players gets any additional information (other than the function value), Even if Byzantine faults are allowed, no set of size t < n/3 can either disrupt the computation or get additional information. Furthermore, the above bounds on t are tight!},
  16240         isbn = {0-89791-264-0},
  16241         doi = {10.1145/62212.62213},
  16242         url = {http://doi.acm.org/10.1145/62212.62213},
  16243         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CompletelenessTheorems1988Ben-Or.pdf},
  16244         www_section = unsorted,
  16245         author = {Ben-Or, Michael and Goldwasser, Shafi and Wigderson, Avi}
  16246 }
  16247 @article {chaum-dc,
  16248         title = {The Dining Cryptographers Problem: Unconditional Sender and Recipient Untraceability},
  16249         journal = {Journal of Cryptology},
  16250         volume = {1},
  16251         year = {1988},
  16252         pages = {65--75},
  16253         abstract = {Keeping confidential who sends which messages, in a world where any physical transmission can be traced to its origin, seems impossible. The solution presented here is unconditionally or cryptographically secure, depending on whether it is based on one-time-use keys or on public keys, respectively. It can be adapted to address efficiently a wide variety of practical considerations},
  16254         www_section = {pseudonym, unconditional security, untraceability},
  16255         issn = {0933-2790},
  16256         url = {http://portal.acm.org/citation.cfm?id=54239},
  16257         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dcnet-jcrypt88.pdf},
  16258         author = {David Chaum}
  16259 }
  16260 @conference {1988_1,
  16261         title = {Founding Crytpography on Oblivious Transfer},
  16262         booktitle = {Proceedings of the Twentieth Annual ACM Symposium on Theory of Computing},
  16263         year = {1988},
  16264         publisher = {ACM},
  16265         organization = {ACM},
  16266         address = {New York, NY, USA},
  16267         abstract = {Suppose your netmail is being erratically censored by Captain Yossarian. Whenever you send a message, he censors each bit of the message with probability 1/2, replacing each censored bit by some reserved character. Well versed in such concepts as redundancy, this is no real problem to you. The question is, can it actually be turned around and used to your advantage? We answer this question strongly in the affirmative. We show that this protocol, more commonly known as oblivious transfer, can be used to simulate a more sophisticated protocol, known as oblivious circuit evaluation([Y]). We also show that with such a communication channel, one can have completely noninteractive zero-knowledge proofs of statements in NP. These results do not use any complexity-theoretic assumptions. We can show that they have applications to a variety of models in which oblivious transfer can be done},
  16268         www_section = {oblivious circuits},
  16269         isbn = {0-89791-264-0},
  16270         doi = {10.1145/62212.62215},
  16271         url = {http://doi.acm.org/10.1145/62212.62215},
  16272         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oblivious_transfer.pdf},
  16273         author = {Kilian, Joe}
  16274 }
  16275 @conference {1987,
  16276         title = {How to Play ANY Mental Game or A Completeness Theorem for Protocols with Honest Majority},
  16277         booktitle = {Proceedings of the Nineteenth Annual ACM Symposium on Theory of Computing},
  16278         year = {1987},
  16279         publisher = {ACM},
  16280         organization = {ACM},
  16281         address = {New York, NY, USA},
  16282         abstract = {We present a polynomial-time algorithm that, given as a input the description of a game with incomplete information and any number of players, produces a protocol for playing the game that leaks no partial information, provided the majority of the players is honest. Our algorithm automatically solves all the multi-party protocol problems addressed in complexity-based cryptography during the last 10 years. It actually is a completeness theorem for the class of distributed protocols with honest majority. Such completeness theorem is optimal in the sense that, if the majority of the players is not honest, some protocol problems have no efficient solution [C]},
  16283         isbn = {0-89791-221-7},
  16284         doi = {10.1145/28395.28420},
  16285         url = {http://doi.acm.org/10.1145/28395.28420},
  16286         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PlayMentalGame1987Goldreich.pdf},
  16287         www_section = unsorted,
  16288         author = {Goldreich, O. and Micali, S. and Wigderson, A.}
  16289 }
  16290 @article {37517,
  16291         title = {A simple and efficient implementation of a small database},
  16292         journal = {SIGOPS Oper. Syst. Rev},
  16293         volume = {21},
  16294         number = {5},
  16295         year = {1987},
  16296         pages = {149--154},
  16297         publisher = {ACM},
  16298         address = {New York, NY, USA},
  16299         abstract = {This paper describes a technique for implementing the sort of small databases that frequently occur in the design of operating systems and distributed systems. We take advantage of the existence of very large virtual memories, and quite large real memories, to make the technique feasible. We maintain the database as a strongly typed data structure in virtual memory, record updates incrementally on disk in a log and occasionally make a checkpoint of the entire database. We recover from crashes by restoring the database from an old checkpoint then replaying the log. We use existing packages to convert between strongly typed data objects and their disk representations, and to communicate strongly typed data across the network (using remote procedure calls). Our memory is managed entirely by a general purpose allocator and garbage collector. This scheme has been used to implement a name server for a distributed system. The resulting implementation has the desirable property of being simultaneously simple, efficient and reliable },
  16300         issn = {0163-5980},
  16301         doi = {10.1145/37499.37517},
  16302         url = {http://portal.acm.org/citation.cfm?id=37499.37517$\#$},
  16303         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/024-DatabasesPaper.pdf},
  16304         www_section = unsorted,
  16305         author = {Andrew D. Birrell and Michael B. Jones and Edward P. Wobber}
  16306 }
  16307 @conference {Stumm:1987:SDR:55482.55508,
  16308         title = {Strategies for decentralized resource management},
  16309         booktitle = {SIGCOMM'87. Proceedings of the ACM Workshop on Frontiers in Computer Communications Technology},
  16310         series = {SIGCOMM '87},
  16311         year = {1987},
  16312         month = aug,
  16313         pages = {245--253},
  16314         publisher = {ACM},
  16315         organization = {ACM},
  16316         address = {Stowe, VT, USA},
  16317         abstract = {Decentralized resource management in distributed systems has become more practical with the availability of communication facilities that support multicasting. In this paper we present several example solutions for managing resources in a decentralized fashion, using multicasting facilities. We review the properties of these solutions in terms of scalability, fault tolerance and efficiency. We conclude that decentralized solutions compare favorably to centralized solutions with respect to all three criteria},
  16318         www_section = {decentralized, distributed systems, multicasting},
  16319         isbn = {0-89791-245-4},
  16320         doi = {http://doi.acm.org/10.1145/55482.55508},
  16321         url = {http://doi.acm.org/10.1145/55482.55508},
  16322         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2787\%20-\%20Strategies\%20for\%20decentralized\%20resource\%20management.pdf},
  16323         author = {Stumm, Michael}
  16324 }
  16325 @book {1986,
  16326         title = {Networks Without User Observability {\textemdash} Design Options},
  16327         booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT' 85},
  16328         series = {Lecture Notes in Computer Science},
  16329         volume = {219},
  16330         year = {1986},
  16331         pages = {245--253},
  16332         publisher = {Springer Berlin Heidelberg},
  16333         organization = {Springer Berlin Heidelberg},
  16334         abstract = {In usual communication networks, the network operator or an intruder could easily observe when, how much and with whom the users communicate (traffic analysis), even if the users employ end-to-end encryption. When ISDNs are used for almost everything, this becomes a severe threat. Therefore, we summarize basic concepts to keep the recipient and sender or at least their relationship unobservable, consider some possible implementations and necessary hierarchical extensions, and propose some suitable performance and reliability enhancements},
  16335         isbn = {978-3-540-16468-5},
  16336         doi = {10.1007/3-540-39805-8_29},
  16337         url = {http://dx.doi.org/10.1007/3-540-39805-8_29},
  16338         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetworkWithoutUserObservabiliy1985Pfitzmann.pdf},
  16339         www_section = unsorted,
  16340         author = {Pfitzmann, Andreas and Waidner, Michael},
  16341         editor = {Pichler, Franz}
  16342 }
  16343 @article {15043,
  16344         title = {Revised report on the algorithmic language scheme},
  16345         journal = {SIGPLAN Not},
  16346         volume = {21},
  16347         number = {12},
  16348         year = {1986},
  16349         pages = {37--79},
  16350         publisher = {ACM},
  16351         address = {New York, NY, USA},
  16352         abstract = {The report gives a defining description of the programming language Scheme. Scheme is a statically scoped and properly tail-recursive dialect of the Lisp programming language invented by Guy Lewis Steele Jr. and Gerald Jay Sussman. It was designed to have an exceptionally clear and simple semantics and few different ways to form expressions. A wide variety of programming paradigms, including imperative, functional, and message passing styles, find convenient expression in Scheme. The introduction offers a brief history of the language and of the report. The first three chapters present the fundamental ideas of the language and describe the notational conventions used for describing the language and for writing programs in the language},
  16353         issn = {0362-1340},
  16354         doi = {10.1145/15042.15043},
  16355         url = {http://en.scientificcommons.org/42347723},
  16356         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/r5rs.pdf},
  16357         www_section = unsorted,
  16358         author = {Rees, Jonathan and Clinger, William and Richard Kelsey}
  16359 }
  16360 @conference {Tanenbaum86usingsparse,
  16361         title = {Using Sparse Capabilities in a Distributed Operating System},
  16362         booktitle = {Using Sparse Capabilities in a Distributed Operating System},
  16363         year = {1986},
  16364         pages = {558--563},
  16365         abstract = {this paper we discuss a system, Amoeba, that uses capabilities for naming and protecting objects. In contrast to traditional, centralized operating systems, in which capabilities are managed by the operating system kernel, in Amoeba all the capabilities are managed directly by user code. To prevent tampering, the capabilities are protected cryptographically. The paper describes a variety of the issues involved, and gives four different ways of dealing with the access rights},
  16366         url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.49.7998},
  16367         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.3350.pdf},
  16368         www_section = unsorted,
  16369         author = {Andrew Tanenbaum and Sape J. Mullender and Robbert Van Renesse}
  16370 }
  16371 @article {214121,
  16372         title = {Impossibility of distributed consensus with one faulty process},
  16373         journal = {J. ACM},
  16374         volume = {32},
  16375         number = {2},
  16376         year = {1985},
  16377         pages = {374--382},
  16378         publisher = {ACM},
  16379         address = {New York, NY, USA},
  16380         abstract = {The consensus problem involves an asynchronous system of processes, some of which may be unreliable. The problem is for the reliable processes to agree on a binary value. In this paper, it is shown that every protocol for this problem has the possibility of nontermination, even with only one faulty process. By way of contrast, solutions are known for the synchronous case, the {\textquotedblleft}Byzantine Generals{\textquotedblright} problem},
  16381         issn = {0004-5411},
  16382         doi = {10.1145/3149.214121},
  16383         url = {http://portal.acm.org/citation.cfm?id=214121$\#$},
  16384         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pods06_paper01.pdf},
  16385         www_section = unsorted,
  16386         author = {Fischer, Michael J. and Lynch, Nancy A. and Paterson, Michael S.}
  16387 }
  16388 @conference {pfitzmann85,
  16389         title = {Networks Without User Observability -- Design Options},
  16390         booktitle = {Proceedings of EUROCRYPT 1985},
  16391         year = {1985},
  16392         month = apr,
  16393         publisher = {Springer-Verlag New York, Inc},
  16394         organization = {Springer-Verlag New York, Inc},
  16395         address = {Linz, Austria},
  16396         abstract = {In present-day communication networks, the network operator or an intruder could easily observe when, how much and with whom the users communicate (traffic analysis), even if the users employ end-to-end encryption. With the increasing use of ISDNs, this becomes a severe threat.
  16397 Therefore, we summarize basic concepts to keep the recipient and sender or at least their relationship unobservable, consider some possible implementations and necessary hierarchical extensions, and propose some suitable performance and reliability enhancements},
  16398         www_section = {anonymity, dining cryptographers, fault-tolerance, ISDN, mix, ring network, traffic analysis, user observability},
  16399         isbn = {0-387-16468-5},
  16400         url = {http://www.semper.org/sirene/publ/PfWa_86anonyNetze.html},
  16401         author = {Andreas Pfitzmann and Michael Waidner}
  16402 }
  16403 @conference {ElGamal:1985:PKC:19478.19480,
  16404         title = {A Public Key Cryptosystem and a Signature Scheme Based on Discrete Logarithms},
  16405         booktitle = {Proceedings of CRYPTO 84 on Advances in cryptology},
  16406         year = {1985},
  16407         month = jan,
  16408         pages = {10--18},
  16409         publisher = {Springer-Verlag New York, Inc},
  16410         organization = {Springer-Verlag New York, Inc},
  16411         address = {Santa Barbara, California},
  16412         abstract = {A new signature scheme is proposed together with an implementation of the Diffie--Hellman key distribution scheme that achieves a public key cryptosystem. The security of both systems relies on the difficulty of computing discrete logarithms over finite fields},
  16413         www_section = {cryptosystem, discrete logarithms, public key, signature scheme},
  16414         isbn = {0-387-15658-5},
  16415         url = {http://dl.acm.org/citation.cfm?id=19478.19480s},
  16416         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CRYPTO\%2784\%20-\%20El\%20Gamal\%20-\%20Public\%20Key\%20Cryptosystem.pdf},
  16417         author = {El Gamal, Taher}
  16418 }
  16419 @article {4202,
  16420         title = {RCS---a system for version control},
  16421         journal = {Softw. Pract. Exper},
  16422         volume = {15},
  16423         number = {7},
  16424         year = {1985},
  16425         pages = {637--654},
  16426         publisher = {John Wiley & Sons, Inc},
  16427         address = {New York, NY, USA},
  16428         abstract = {An important problem in program development and maintenance is version control, i.e., the task of keeping a software system consisting of many versions and configurations well organized. The Revision Control System (RCS) is a software tool that assists with that task. RCS manages revisions of text documents, in particular source programs, documentation, and test data. It automates the storing, retrieval, logging and identification of revisions, and it provides selection mechanisms for composing configurations. This paper introduces basic version control concepts and discusses the practice of version control using RCS. For conserving space, RCS stores deltas, i.e., differences between successive revisions. Several delta storage methods are discussed. Usage statistics show that RCS's delta storage method is space and time efficient. The paper concludes with a detailed survey of version control tools},
  16429         www_section = {version control},
  16430         issn = {0038-0644},
  16431         doi = {10.1002/spe.4380150703},
  16432         url = {http://portal.acm.org/citation.cfm?id=4202$\#$},
  16433         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.3350_0.pdf},
  16434         author = {Tichy, Walter F.}
  16435 }
  16436 @article {chaum85,
  16437         title = {Security without Identification: Transaction Systems to Make Big Brother Obsolete},
  16438         journal = {Communications of the ACM},
  16439         volume = {28},
  16440         number = {10},
  16441         year = {1985},
  16442         month = oct,
  16443         pages = {1030--1044  },
  16444         abstract = {The large-scale automated transaction systems of the near future can be designed to protect the privacy and maintain the security of both individuals and organizations},
  16445         issn = {0001-0782},
  16446         doi = {http://doi.acm.org/10.1145/4372.4373},
  16447         url = {http://portal.acm.org/citation.cfm?id=4373},
  16448         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.48.4680.pdf},
  16449         www_section = unsorted,
  16450         author = {David Chaum}
  16451 }
  16452 @book {538134,
  16453         title = {Capability-Based Computer Systems},
  16454         year = {1984},
  16455         publisher = {Butterworth-Heinemann},
  16456         organization = {Butterworth-Heinemann},
  16457         address = {Newton, MA, USA},
  16458         isbn = {0932376223},
  16459         url = {http://portal.acm.org/citation.cfm?id=538134$\#$},
  16460         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Preface.pdf},
  16461         www_section = unsorted,
  16462         author = {Levy, Henry M.}
  16463 }
  16464 @article {357176,
  16465         title = {The Byzantine Generals Problem},
  16466         journal = {ACM Trans. Program. Lang. Syst},
  16467         volume = {4},
  16468         number = {3},
  16469         year = {1982},
  16470         pages = {382--401},
  16471         publisher = {ACM},
  16472         address = {New York, NY, USA},
  16473         issn = {0164-0925},
  16474         doi = {10.1145/357172.357176},
  16475         url = {http://portal.acm.org/citation.cfm?id=357176$\#$},
  16476         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/byz.pdf},
  16477         www_section = unsorted,
  16478         author = {Lamport, Leslie and Shostak, Robert and Pease, Marshall}
  16479 }
  16480 @conference {1982,
  16481         title = {Protocols for Secure Computations},
  16482         booktitle = {Proceedings of the 23rd Annual Symposium on Foundations of Computer Science},
  16483         year = {1982},
  16484         publisher = {IEEE Computer Society},
  16485         organization = {IEEE Computer Society},
  16486         address = {Washington, DC, USA},
  16487         doi = {10.1109/SFCS.1982.88},
  16488         url = {http://dx.doi.org/10.1109/SFCS.1982.88},
  16489         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ProtocolSecurecomputations1982Yao.pdf},
  16490         www_section = unsorted,
  16491         author = {Yao, Andrew C.}
  16492 }
  16493 @article {chaum-mix,
  16494         title = {Untraceable electronic mail, return addresses, and digital pseudonyms},
  16495         journal = {Communications of the ACM},
  16496         volume = {24},
  16497         number = {2},
  16498         year = {1981},
  16499         month = feb,
  16500         pages = {84--90},
  16501         abstract = {A technique based on public key cryptography is presented that allows an electronic mail system to hide who a participant communicates with as well as the content of the communication--in spite of an unsecured underlying telecommunication system.  The technique does not require a universally trusted authority.  One correspondent can remain anonymous to a second, while allowing the second to respond via an untraceable return address.
  16502 The technique can also be used to form rosters of untraceable digital pseudonyms from selected applications.  Applicants retain the exclusive ability to form digital signatures corresponding to their pseudonyms.  Elections in which any interested party can verify that the ballots have been properly counted are possible if anonymously mailed ballots are signed with pseudonyms from a roster of registered voters.  Another use allows an individual to correspond with a record-keeping organization under a unique pseudonym which appears in a roster of acceptable clients},
  16503         www_section = {digital signature, electronic mail, privacy, pseudonym, public key cryptography, traffic analysis},
  16504         issn = {0001-0782 },
  16505         doi = {http://doi.acm.org/10.1145/358549.358563},
  16506         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chaum-mix_0.pdf},
  16507         url = {https://bibliography.gnunet.org},
  16508         author = {David Chaum}
  16509 }
  16510 @article {10.1109/SP.1980.10006,
  16511         title = {Protocols for Public Key Cryptosystems},
  16512         journal = {Security and Privacy, IEEE Symposium on},
  16513         year = {1980},
  16514         pages = {0--122},
  16515         publisher = {IEEE Computer Society},
  16516         address = {Los Alamitos, CA, USA},
  16517         abstract = {New Cryptographic protocols which take full advantage of the unique properties of public key cryptosystems are now evolving. Several protocols for public key distribution and for digital signatures are briefly compared with each other and with the conventional alternative},
  16518         issn = {1540-7993},
  16519         doi = {10.1109/SP.1980.10006},
  16520         url = {http://www.computer.org/portal/web/csdl/doi/10.1109/SP.1980.10006},
  16521         www_section = unsorted,
  16522         author = {Ralph C. Merkle}
  16523 }
  16524 @conference {1979,
  16525         title = {Compact Encodings of List Structure},
  16526         booktitle = {Compact Encodings of List Structure},
  16527         year = {1979},
  16528         publisher = {ACM  New York, NY, USA},
  16529         organization = {ACM  New York, NY, USA},
  16530         abstract = {List structures provide a general mechanism for representing easily changed structured data, but can introduce inefficiencies in the use of space when fields of uniform size are used to contain pointers to data and to link the structure. Empirically determined regularity can be exploited to provide more space-efficient encodings without losing the flexibility inherent in list structures. The basic scheme is to provide compact pointer fields big enough to accommodate most values that occur in them and to provide {\textquotedblleft}escape{\textquotedblright} mechanisms for exceptional cases. Several examples of encoding designs are presented and evaluated, including two designs currently used in Lisp machines. Alternative escape mechanisms are described, and various questions of cost and implementation are discussed. In order to extrapolate our results to larger systems than those measured, we propose a model for the generation of list pointers and we test the model against data from two programs. We show that according to our model, list structures with compact cdr fields will, as address space grows, continue to be compacted well with a fixed-width small field. Our conclusion is that with a microcodable processor, about a factor of two gain in space efficiency for list structure can be had for little or no cost in processing time},
  16531         doi = {10.1145/357073.357081},
  16532         url = {http://portal.acm.org/citation.cfm?id=357081$\#$collab},
  16533         www_section = unsorted,
  16534         author = {Daniel G. Bobrow and Douglas W. Clark}
  16535 }
  16536 @article {padlipky78,
  16537         title = {Limitations of End-to-End Encryption in Secure Computer Networks},
  16538         number = {ESD-TR-78-158},
  16539         year = {1978},
  16540         month = aug,
  16541         institution = {The MITRE Corporation: Bedford MA, HQ Electronic Systems Division},
  16542         address = {Hanscom AFB, MA},
  16543         www_section = {traffic analysis},
  16544         journal = {unknown},
  16545         url = {http://stinet.dtic.mil/cgi-bin/GetTRDoc?AD=3DA059221\&Location=3DU2\&doc=3D+=GetTRDoc.pdf},
  16546         author = {Michael A. Padlipsky and David W. Snow and Paul A. Karger}
  16547 }
  16548 @mastersthesis {karger77,
  16549         title = {Non-Discretionary Access Control for Decentralized Computing Systems},
  16550         number = {MIT/LCS/TR-179},
  16551         year = {1977},
  16552         month = may,
  16553         school = {Laboratory for Computer Science, Massachusetts Institute of Technology},
  16554         type = {S. M. \& E. E. thesis},
  16555         address = {Cambridge, MA},
  16556         abstract = {This thesis examines the issues relating to non-discretionary access controls for decentralized computing systems. Decentralization changes the basic character of a computing system from a set of processes referencing a data base to a set of processes sending and receiving messages. Because messages must be acknowledged, operations that were read-only in a centralized system become read-write operations. As a result, the lattice model of non-discretionary access control, which mediates operations based on read versus read-write considerations, does not allow direct transfer of algorithms from centralized systems to decentralized systems. This thesis develops new mechanisms that comply with the lattice model and provide the necessary functions for effective decentralized computation. Secure protocols at several different levels are presented in the thesis. At the lowest level, a host or host protocol is shown that allows communication between hosts with effective internal security controls. Above this level, a host independent naming scheme is presented that allows generic naming of services in a manner consistent with the lattice model. The use of decentralized processing to aid in the downgrading of information is shown in the design of a secure intelligent terminal. Schemes are presented to deal with the decentralized administration of the lattice model, and with the proliferation of access classes as the user community of a decentralized system become more diverse. Limitations in the use of end-to-end encryption when used with the lattice model are identified, and a scheme is presented to relax these limitations for broadcast networks. Finally, a scheme is presented for forwarding authentication information between hosts on a network, without transmitting passwords (or their equivalent) over a network},
  16557         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MIT-LCS-TR-179.pdf},
  16558         www_section = unsorted,
  16559         url = {https://bibliography.gnunet.org},
  16560         author = {Paul A. Karger}
  16561 }
  16562 @article {1977,
  16563         title = {Towards a methodology for statistical disclosure control},
  16564         journal = {Statistik Tidskrift},
  16565         volume = {15},
  16566         year = {1977},
  16567         pages = {2--1},
  16568         www_section = {database_privacy differential_privacy stat},
  16569         url = {https://bibliography.gnunet.org},
  16570         author = {Dalenius, T.}
  16571 }
  16572 @article {1076,
  16573         title = {New directions in cryptography},
  16574         journal = {IEEE Transactions on Information Theory},
  16575         volume = {22},
  16576         year = {1976},
  16577         month = nov,
  16578         pages = {644--654},
  16579         abstract = {Two kinds of contemporary developments in cryptography are examined. Widening applications of teleprocessing have given rise to a need for new types of cryptographic systems, which minimize the need for secure key distribution channels and supply the equivalent of a written signature. This paper suggests ways to solve these currently open problems. It also discusses how the theories of communication and computation are beginning to provide the tools to solve cryptographic problems of long standing},
  16580         www_section = {cryptographic systems, cryptography},
  16581         issn = {0018-9448},
  16582         doi = {10.1109/TIT.1976.1055638},
  16583         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Trans.\%20on\%20Info.\%20-\%20New\%20directions\%20in\%20cryptography.pdf},
  16584         url = {https://bibliography.gnunet.org},
  16585         author = {Whitfield Diffie and Martin E. Hellman}
  16586 }
  16587 @article {1971,
  16588         title = {The Evolution of Reciprocal Altruism},
  16589         journal = {The Quarterly Review of Biology  },
  16590         volume = {46},
  16591         year = {1971},
  16592         month = mar,
  16593         pages = {35--57},
  16594         abstract = {A model is presented to account for the natural selection of what is termed reciprocally altruistic behavior. The model shows how selection can operate against the cheater (non-reciprocator) in the system. Three instances of altruistic behavior are discussed, the evolution of which the model can explain: (1) behavior involved in cleaning symbioses; (2) warning cries in birds; and (3) human reciprocal altruism. Regarding human reciprocal altruism, it is shown that the details of the psychological system that regulates this altruism can be explained by the model. Specifically, friendship, dislike, moralistic aggression, gratitude, sympathy, trust, suspicion, trustworthiness, aspects of guilt, and some forms of dishonesty and hypocrisy can be explained as important adaptations to regulate the altruistic system. Each individual human is seen as possessing altruistic and cheating tendencies, the expression of which is sensitive to developmental variables that were selected to set the tendencies at a balance appropriate to the local social and ecological environment},
  16595         www_section = {behavior, evolution, reciprocal altruism},
  16596         url = {http://www.jstor.org/pss/2822435},
  16597         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trivers\%20-\%20The\%20evolution\%20of\%20reciprocal\%20altruism.pdf},
  16598         author = {Robert L. Trivers}
  16599 }
  16600 @article {1970_0,
  16601         title = {An Efficient Heuristic Procedure for Partitioning Graphs},
  16602         journal = {The Bell System Technical Journal },
  16603         volume = {49},
  16604         year = {1970},
  16605         month = jan,
  16606         pages = {291--307},
  16607         abstract = {We consider the problem of partitioning the nodes of a graph with costs on its edges into subsets of given sizes so as to minimize the sum of the costs on all edges cut. This problem arises in several physical situations- for example, in assigning the components of electronic circuits to circuit boards to minimize the number of connections between boards. This paper presents a heuristic method for partitioning arbitrary graphs which is both effective in finding optimal partitions, and fast enough to be practical in solving large problems},
  16608         www_section = {heuristic method, partitioning graphs},
  16609         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kernighan\%20\%26\%20Lin\%20-\%20An\%20Efficient\%20Heuristic\%20Procedure\%20for\%20Partitioning\%20Graphs\%250A.pdf},
  16610         url = {https://bibliography.gnunet.org},
  16611         author = {Brian W. Kernighan and S. Lin}
  16612 }
  16613 @article {1970_1,
  16614         title = {The market for "lemons": Quality uncertainty and the market mechanism},
  16615         journal = {The Quarterly Journal of Economics},
  16616         volume = {84},
  16617         year = {1970},
  16618         month = aug,
  16619         pages = {488--500},
  16620         abstract = {I. Introduction, 488.--II. The model with automobiles as an example, 489.--III. Examples and applications, 492.--IV. Counteracting institutions, 499.--V. Conclusion, 500},
  16621         url = { http://www.jstor.org/stable/1879431},
  16622         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/akerlof.pdf},
  16623         www_section = unsorted,
  16624         author = {George A. Akerlof}
  16625 }
  16626 @article {Bloom70space/timetrade-offs,
  16627         title = {Space/Time Trade-offs in Hash Coding with Allowable Errors},
  16628         journal = {Communications of the ACM},
  16629         volume = {13},
  16630         year = {1970},
  16631         pages = {422--426},
  16632         abstract = {this paper trade-offs among certain computational factors in hash coding are analyzed. The paradigm problem considered is that of testing a series of messages one-by-one for membership in a given set of messages. Two new hash- coding methods are examined and compared with a particular conventional hash-coding method. The computational factors considered are the size of the hash area (space), the time required to identify a message as a nonmember of the given set (reject time), and an allowable error frequency},
  16633         www_section = {Bloom filter, compression},
  16634         url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.2080\&rep=rep1\&type=pdf},
  16635         author = {Burton H. Bloom}
  16636 }
  16637 @article {1968,
  16638         title = {The Tragedy of the Commons},
  16639         journal = {Science},
  16640         volume = {162},
  16641         year = {1968},
  16642         pages = {1243--1248},
  16643         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Science\%20-\%20Hardin\%20-\%20The\%20Tragedy\%20of\%20the\%20Commons.pdf},
  16644         www_section = unsorted,
  16645         url = {https://bibliography.gnunet.org},
  16646         author = {Garrett Hardin}
  16647 }
  16648 @article {1962,
  16649         title = {Low-density parity-check codes},
  16650         journal = {Information Theory, IRE Transactions on },
  16651         volume = {8},
  16652         year = {1962},
  16653         pages = {21--28 },
  16654         chapter = {21},
  16655         abstract = {A low-density parity-check code is a code specified by a parity-check matrix with the following properties: each column contains a small fixed numberj geq 3of l's and each row contains a small fixed numberk > jof l's. The typical minimum distance of these codes increases linearly with block length for a fixed rate and fixedj. When used with maximum likelihood decoding on a sufficiently quiet binary-input symmetric channel, the typical probability of decoding error decreases exponentially with block length for a fixed rate and fixedj. A simple but nonoptimum decoding scheme operating directly from the channel a posteriori probabilities is described. Both the equipment complexity and the data-handling capacity in bits per second of this decoder increase approximately linearly with block length. Forj > 3and a sufficiently low rate, the probability of error using this decoder on a binary symmetric channel is shown to decrease at least exponentially with a root of the block length. Some experimental results show that the actual probability of decoding error is much smaller than this theoretical bound},
  16656         www_section = {coding theory, low-density parity-check},
  16657         issn = {0096-1000 },
  16658         doi = {10.1109/TIT.1962.1057683   },
  16659         url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1057683},
  16660         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ldpc.pdf},
  16661         author = {Robert G. Gallager}
  16662 }
  16663 @article {reed60polynomial,
  16664         title = {Polynomial codes over certain finite fields},
  16665         journal = {Journal of the Society of Industrial and Applied Mathematics},
  16666         volume = {8},
  16667         number = {2},
  16668         year = {1960},
  16669         month = jun,
  16670         pages = {300--304},
  16671         www_section = {filing-erasure-coding},
  16672         url = {http://www.jstor.org/pss/2098968},
  16673         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Reed\%20\%26\%20Solomon\%20-\%20Polynomial\%20Codes\%20Over\%20Certain\%20Finite\%20Fields.pdf},
  16674         author = {Irving Reed and Golomb Solomon}
  16675 }
  16676 @article {1959,
  16677         title = {On Random Graphs I},
  16678         journal = {Publicationes Mathematicae (Debrecen)},
  16679         volume = {6},
  16680         year = {1959},
  16681         month = jan,
  16682         pages = {290--297},
  16683         www_section = {graphs, random, random graphs},
  16684         url = {https://bibliography.gnunet.org},
  16685         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Erd\%C5\%91s\%20\%26\%20R\%C3\%A9nyi\%20-\%20On\%20Random\%20Graphs.pdf},
  16686         author = {Paul Erd{\H o}s and Alfr{\'e}d R{\'e}nyi}
  16687 }
  16688 @article {368907,
  16689         title = {On programming of arithmetic operations},
  16690         journal = {Commun. ACM},
  16691         volume = {1},
  16692         number = {8},
  16693         year = {1958},
  16694         pages = {3--6},
  16695         publisher = {ACM},
  16696         address = {New York, NY, USA},
  16697         issn = {0001-0782},
  16698         doi = {10.1145/368892.368907},
  16699         url = {http://portal.acm.org/citation.cfm?id=368907$\#$},
  16700         www_section = unsorted,
  16701         author = {Andrey Petrovych Ershov}
  16702 }
  16703 @article {1950,
  16704         title = {Equilibrium points in n-person games},
  16705         journal = {PNAS. Proceedings of the National Academy of Sciences of the USA},
  16706         volume = {36},
  16707         year = {1950},
  16708         month = jan,
  16709         pages = {48--49},
  16710         abstract = {One may define a concept of an n-person game in which each player has a finite set of pure strategies and in which a definite set of payments to the n players corresponds to each n-tuple of pure strategies, one strategy being taken for each player. For mixed strategies, which are probability distributions over the pure strategies, the pay-off functions are the expectations of the players, thus becoming polylinear forms},
  16711         www_section = {n-persona game, strategy},
  16712         doi = {10.1073/pnas.36.1.48},
  16713         url = {https://bibliography.gnunet.org},
  16714         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PNAS\%20-\%20Nash\%20-\%20Equilibrium\%20points\%20in\%20n-person\%20games.pdf},
  16715         author = {John F. Nash Jr.}
  16716 }
  16717 @book {1944,
  16718         title = {The Theory of Games and Economic Behavior},
  16719         year = {1944},
  16720         pages = {0--776},
  16721         publisher = {Princeton University Press},
  16722         organization = {Princeton University Press},
  16723         edition = {60th},
  16724         address = {Princeton, New Jersey, USA},
  16725         www_section = {economic behavior, games, theory},
  16726         isbn = {978-0-691-13061-3 },
  16727         url = {http://www.archive.org/details/theoryofgamesand030098mbp},
  16728         www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Neumann\%20\%26\%20Morgenstern\%20-\%20Theory\%20of\%20Games\%20and\%20Economic\%20Behavior.pdf},
  16729         author = {John von Neumann and Oskar Morgenstern}
  16730 }
  16731 @mastersthesis {jayarama2015,
  16732         title = {Publish/Subscribe for Large-Scale Social Interaction: Design, Analysis and Ressource Provisioning},
  16733         volume = {Doctor of Philosophy},
  16734         year = {2015},
  16735         month = mar,
  16736         school = {University of Oslo},
  16737         www_section = {publish-subscribe, pubsub, social interaction, messaging, multicast},
  16738         www_pdf_url = {https://www.duo.uio.no/bitstream/handle/10852/43117/1595-Setty-DUO-Thesis.pdf},
  16739         www_tags = selected,
  16740         author = {Vinay Jayarama Setty}
  16741 }