commit 9c0f8a68f70c606dab9fd4506f2282ac2b5256f3
parent 9c7dc6a663654a00ae89306c9c129172c6387935
Author: Nils Gillmann <ng0@n0.is>
Date: Sun, 7 Oct 2018 23:14:54 +0000
gnunetbib.bib: fixes.
Signed-off-by: Nils Gillmann <ng0@n0.is>
Diffstat:
| M | gnunetbib.bib | | | 74 | ++++++++++++++++++++++++++++++++++++++++++++++---------------------------- |
1 file changed, 46 insertions(+), 28 deletions(-)
diff --git a/gnunetbib.bib b/gnunetbib.bib
@@ -2131,34 +2131,10 @@ Beyond the theoretical interest in modeling KDFs, this work is intended to addre
school = {University of Helsinki},
type = {Master{\textquoteright}s Thesis},
address = {Helsinki},
- abstract = {As the virtual world grows more complex, finding a standard way for storing data becomes in-
-creasingly important. Ideally, each data item would be brought into the computer system only
-once. References for data items need to be cryptographically verifiable, so the data can maintain
-its identity while being passed around. This way there will be only one copy of the users family
-photo album, while the user can use multiple tools to show or manipulate the album. Copies of
-users data could be stored on some of his family members computer, some of his computers, but
-also at some online services which he uses. When all actors operate over one replicated copy of the
-data, the system automatically avoids a single point of failure. Thus the data will not disappear
-with one computer breaking, or one service provider going out of business. One shared copy also
-makes it possible to delete a piece of data from all systems at once, on users request.
-In our research we tried to find a model that would make data manageable to users, and make
-it possible to have the same data stored at various locations. We studied three systems, Persona,
-Freenet, and GNUnet, that suggest different models for protecting user data. The main application
-areas of the systems studied include securing online social networks, providing anonymous web,
-and preventing censorship in file-sharing. Each of the systems studied store user data on machines
-belonging to third parties. The systems differ in measures they take to protect their users from data
-loss, forged information, censorship, and being monitored. All of the systems use cryptography to
-secure names used for the content, and to protect the data from outsiders.
-Based on the gained knowledge, we built a prototype platform called Peerscape, which stores user
-data in a synchronized, protected database. Data items themselves are protected with cryptography
-against forgery, but not encrypted as the focus has been disseminating the data directly among
-family and friends instead of letting third parties store the information. We turned the synchronizing
-database into peer-to-peer web by revealing its contents through an integrated http server. The
-REST-like http API supports development of applications in javascript.
-
-To evaluate the platform{\textquoteright}s suitability for application development we wrote some simple applications, including a public chat room, bittorrent site, and a flower growing game. During our early tests we came to the conclusion that using the platform for simple applications works well. As web standards develop further, writing applications for the platform should become easier. Any system this complex will have its problems, and we are not expecting our platform to replace the existing web, but are fairly impressed with the results and consider our work important from the perspective of managing user data},
+ abstract = {As the virtual world grows more complex, finding a standard way for storing data becomes increasingly important. Ideally, each data item would be brought into the computer system only once. References for data items need to be cryptographically verifiable, so the data can maintain its identity while being passed around. This way there will be only one copy of the users family photo album, while the user can use multiple tools to show or manipulate the album. Copies of users data could be stored on some of his family members computer, some of his computers, but also at some online services which he uses. When all actors operate over one replicated copy of the data, the system automatically avoids a single point of failure. Thus the data will not disappear with one computer breaking, or one service provider going out of business. One shared copy also makes it possible to delete a piece of data from all systems at once, on users request. In our research we tried to find a model that would make data manageable to users, and make it possible to have the same data stored at various locations. We studied three systems, Persona, Freenet, and GNUnet, that suggest different models for protecting user data. The main application areas of the systems studied include securing online social networks, providing anonymous web, and preventing censorship in file-sharing. Each of the systems studied store user data on machines belonging to third parties. The systems differ in measures they take to protect their users from data loss, forged information, censorship, and being monitored. All of the systems use cryptography to secure names used for the content, and to protect the data from outsiders. Based on the gained knowledge, we built a prototype platform called Peerscape, which stores user data in a synchronized, protected database. Data items themselves are protected with cryptography against forgery, but not encrypted as the focus has been disseminating the data directly among family and friends instead of letting third parties store the information. We turned the synchronizing database into peer-to-peer web by revealing its contents through an integrated http server. The REST-like http API supports development of applications in javascript. To evaluate the platform{\textquoteright}s suitability for application development we wrote some simple applications, including a public chat room, bittorrent site, and a flower growing game. During our early tests we came to the conclusion that using the platform for simple applications works well. As web standards develop further, writing applications for the platform should become easier. Any system this complex will have its problems, and we are not expecting our platform to replace the existing web, but are fairly impressed with the results and consider our work important from the perspective of managing user data},
keywords = {content centric, ECRS, Freenet, GNUnet, P2P, Peerscape, Persona},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/twr-dp2pwa.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Toni Ruottu}
}
@conference {continual,
@@ -2167,6 +2143,7 @@ To evaluate the platform{\textquoteright}s suitability for application developme
year = {2010},
month = {June},
pages = {715--724},
+ www_section = {https://bibliography.gnunet.org},
author = {Dwork, Cynthia and Naor, Moni and Pitassi, Toniann and Rothblum, Guy N.}
}
@article {2010_4,
@@ -2187,6 +2164,7 @@ To evaluate the platform{\textquoteright}s suitability for application developme
school = {IRISA},
type = {phd},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/DistributingSocialApp2010Leroy.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Vincent Leroy}
}
@book {2010_5,
@@ -2204,7 +2182,7 @@ To evaluate the platform{\textquoteright}s suitability for application developme
www_section = {http://dx.doi.org/10.1007/978-3-642-14527-8_12},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/drac-pet2010.pdf},
author = {Danezis, George and Claudia Diaz and Troncoso, Carmela and Laurie, Ben},
- editor = {Atallah, MikhailJ. and Hopper, NicholasJ.}
+ editor = {Atallah, MikhailJ. and Hopper, Nicholas J}
}
@conference {2010_6,
title = {Efficient DHT attack mitigation through peers{\textquoteright} ID distribution },
@@ -2217,6 +2195,7 @@ process follows a geometric distribution. We then use this result to detect DHT
method detects the most efficient attacks with a very small false-negative rate, while countermeasures successfully filter almost all malicious peers involved in an attack. Moreover, our solution completely fits the current design of the KAD network and introduces no network overhead},
keywords = {attack detection, attack mitigation, distributed hash table, IDs distribution, KAD, Sybil attack},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/HotP2P\%2710\%20-\%20KAD\%20DHT\%20attack\%20mitigation.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}
}
@conference {Koch:2010:EPL:1827418.1827440,
@@ -2245,6 +2224,7 @@ method detects the most efficient attacks with a very small false-negative rate,
abstract = {While social networks provide news from old buddies, you can learn a lot more from people you do not know, but with whom you share many interests. We show in this paper how to build a network of anonymous social acquaintances using a gossip protocol we call Gossple, and how to leverage such a network to enhance navigation within Web 2.0 collaborative applications, {\`a} la LastFM and Delicious. Gossple nodes (users) periodically gossip digests of their interest profiles and compute their distances (in terms of interest) with respect to other nodes. This is achieved with little bandwidth and storage, fast convergence, and without revealing which profile is associated with which user. We evaluate Gossple on real traces from various Web 2.0 applications with hundreds of PlanetLab hosts and thousands of simulated nodes},
keywords = {gossple, social networks},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/gossple2010Bertier.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Marin Bertier and Davide Frey and Rachid Guerraoui and Anne-Marie Kermarrec and Vincent Leroy}
}
@article { duminuco:hierarchical,
@@ -2258,6 +2238,7 @@ method detects the most efficient attacks with a very small false-negative rate,
keywords = {dependability, erasure codes, peer-to-peer networking, reliability, storage},
doi = {10.1007/s12083-009-0044-8},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Duminuco\%20\%26\%20Biersack\%20-\%20Hierarchical\%20Codes.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Alessandro Duminuco and E W Biersack}
}
@conference {2010_7,
@@ -2299,6 +2280,7 @@ outperforms methods that use only one type of social content. Second, we present
pages = {3--18},
keywords = {autonetkit, emulation, netkit, network, testbed, virtualization},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/AutoNetkit_0.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Hung X. Nguyen and Roughan, Matthew and Knight, Simon and Nick Falkner and Maennel, Olaf and Randy Bush}
}
@mastersthesis {2010_8,
@@ -2333,6 +2315,7 @@ outperforms methods that use only one type of social content. Second, we present
pages = {401--411},
keywords = {distributed applications, emulation, GENI, PlanetLab, testbed},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/gush.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Jeannie R. Albrecht and Danny Yuxing Huang}
}
@conference {DBLP:conf/tridentcom/PeralaPML10,
@@ -2342,6 +2325,7 @@ outperforms methods that use only one type of social content. Second, we present
pages = {69--83},
keywords = {emulation, P2P, testbed},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/A\%20Novel\%20Testbed\%20for\%20P2P\%20Networks.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Pekka H. J. Per{\"a}l{\"a} and Jori P. Paananen and Milton Mukhopadhyay and Jukka-Pekka Laulajainen}
}
@conference {Locher:2010:PKN:2018057.2018085,
@@ -2370,6 +2354,7 @@ outperforms methods that use only one type of social content. Second, we present
pages = {141--152},
address = {Munich, Germany},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/fessi_iptcomm_2010.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Fessi, Ali and Nathan S Evans and Heiko Niedermayer and Ralph Holz}
}
@article {Isdal:2010:PPD:1851275.1851198,
@@ -2402,6 +2387,7 @@ outperforms methods that use only one type of social content. Second, we present
issn = {1533-5399},
doi = {http://doi.acm.org/10.1145/1667067.1667071},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/privacy_preserving_similarity.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Pang, Hweehwa and Shen, Jialie and Krishnan, Ramayya}
}
@conference {2010_10,
@@ -2470,6 +2456,7 @@ outperforms methods that use only one type of social content. Second, we present
year = {2010},
abstract = {Application-Layer Multicast has become a promising class of protocols since IP Multicast has not found wide area deployment in the Internet. Developing such protocols requires in-depth analysis of their properties even with large numbers of participants---a characteristic which is at best hard to achieve in real network experiments. Several well-known simulation frameworks have been developed and used in recent years, but none has proved to be fitting the requirements for analyzing large-scale application-layer networks. In this paper we propose the OverSim framework as a promising simulation environment for scalabe Application-Layer Multicast research. We show that OverSim is able to manage even overlays with several thousand participants in short time while consuming comparably little memory. We compare the framework{\textquoteright}s runtime properties with the two exemplary Application-Layer Mutlicast protocols Scribe and NICE. The results show that both simulation time and memory consumption grow linearly with the number of nodes in highly feasible dimensions},
keywords = {multicast, NICE, OverSim, Scribe},
+ www_section = {https://bibliography.gnunet.org},
author = {Stephan Krause and H{\"u}bsch, Christian}
}
@conference {Burkhart:2010:SPA:1929820.1929840,
@@ -2493,6 +2480,7 @@ outperforms methods that use only one type of social content. Second, we present
title = {Unleashing Tor, BitTorrent \& Co.: How to Relieve TCP Deficiencies in Overlays},
booktitle = {LCN 2010: Proceedings of the 35th IEEE Conference on Local Computer Networks},
year = {2010},
+ www_section = {https://bibliography.gnunet.org},
author = {Daniel Marks and Florian Tschorsch and Bjoern Scheuermann}
}
@conference {2010_13,
@@ -2519,6 +2507,7 @@ This work was partially funded as part of the Spontaneous Virtual Networks (SpoV
year = {2010},
note = {Demo},
address = {San Diego, CA, USA},
+ www_section = {https://bibliography.gnunet.org},
author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland Bless and Oliver Waldhorst and Martina Zitterbart}
}
@conference {DBLP:conf/ccs/EdmanS09,
@@ -2611,18 +2600,22 @@ Five years ago a previous study examined the AS-level threat against client and
month = {April},
keywords = {Byzantine Resilient Sampling, Random Membership, random sampling},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Brahms-Comnet-Mar09.pdf , https://gnunet.org/git/bibliography.git/tree/docs/Brahms-rps-mar09.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Edward Bortnikov and Maxim Gurevich and Idit Keidar and Gabriel Kliot and Alexander Shraer}
}
@conference {DBLP:conf/sss/Kermarrec09,
title = {Challenges in Personalizing and Decentralizing the Web: An Overview of GOSSPLE},
year = {2009},
pages = {1--16},
+ www_section = {https://bibliography.gnunet.org},
author = {Anne-Marie Kermarrec}
}
@conference {2009_1,
title = {CLIO/UNISONO: practical distributed and overlay- wide network measurement},
+ booktitle = {CLIO/UNISONO: practical distributed and overlay-wide network measurement},
year = {2009},
abstract = {Building on previous work, we present an early version of our CLIO/UNISONO framework for distributed network measurements. CLIO/UNISONO is a generic measurement framework specifically aimed at overlays that need measurements for optimization purposes. In this talk, we briefly introduce the most important concepts and then focus on some more advanced mechanisms like measurements across connectivity domains and remote orders},
+ www_section = {https://bibliography.gnunet.org},
author = {Ralph Holz and Dirk Haage}
}
@conference {2009_2,
@@ -2634,6 +2627,7 @@ Five years ago a previous study examined the AS-level threat against client and
keywords = {collaboration, collusion-resistant distributed protocol, Computer applications, computer networks, cryptographic protocols, cryptography, data privacy, distributed computing, homorphic encryption computation, Laboratories, Portable media players, privacy-preserving computation, Privacy-preserving computation of trust, private multiparty summation protocol, scalar product protocol, secure multi-party computation, Secure scalar product, security, Superposed sending., Telephony, trust computation},
doi = {10.1109/NCA.2009.48},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/CollusionResistant2009Melchor.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Melchor, C.A. and Ait-Salem, B. and Gaborit, P.}
}
@article {DBLP:journals/tdp/NojimaK09,
@@ -2719,6 +2713,7 @@ We measure the empirical trade-off between accuracy and privacy in these adaptat
school = {Technische Universit{\"a}t M{\"u}nchen},
type = {Diplomarbeit},
address = {Munich, Germany},
+ www_section = {https://bibliography.gnunet.org},
author = {Markus Bucher}
}
@conference {Cholez:2009:ESA:1574663.1574671,
@@ -2803,6 +2798,7 @@ This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynam
keywords = {churn, distributed hash table, KAD, Kademlia},
issn = {1063-6692},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Long\%20Term\%20Study\%20of\%20Peer\%20Behavior\%20in\%20the\%20kad\%20DHT.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}
}
@conference {1551621,
@@ -2870,6 +2866,7 @@ In addition, the most effective improvements appeared to be the application of
pseudo-random simulations and limiting simulation lengths, while other techniques have been shown to be less effective or even ineffective. Overall, when applying the best performing techniques, an AI with advanced playing strength has been created, such that further research is likely to push this performance to a strength of expert level},
keywords = {artificial intelligence, MCTS, modern board game, Monte-Carlo Tree Search, search techniques},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Thesis\%20-\%20F.Schadd.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Frederik Christiaan Schadd}
}
@book {2009_7,
@@ -2939,6 +2936,7 @@ In this paper we establish the optimal trade-off between the round complexity an
address = {TU Dresden, Germany },
keywords = {I2P},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/I2P-PET-CON-2009.1.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Lars Schimmer}
}
@conference {p2p09-peersim,
@@ -9756,7 +9754,7 @@ ict theory to analyse these systems. Under our assumptions, resource distributio
www_section = {http://dx.doi.org/10.1007/978-3-540-24676-3_1},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/EffecitvePrivateMatching2004Freedman.pdf},
author = {Freedman, MichaelJ. and Nissim, Kobbi and Pinkas, Benny},
- editor = {Cachin, Christian and Camenisch, JanL.}
+ editor = {Cachin, Christian and Camenisch, Jan L}
}
@conference {Helmy04efficientresource,
title = {Efficient Resource Discovery in Wireless AdHoc Networks: Contacts Do Help},
@@ -14073,6 +14071,7 @@ We also present a scheme resilient to even pseudonymous profiling yet preserving
}
@conference {Michiardi01core:a,
title = {CORE: A Collaborative Reputation Mechanism to enforce node cooperation in Mobile Ad hoc Networks},
+ booktitle = {CORE: A Collaborative Reputation Mechanism to enforce node cooperation in Mobile Ad hoc Networks},
year = {2001},
pages = {107--121},
abstract = {Countermeasures for node misbehavior and selfishness are mandatory requirements in MANET. Selfishness that causes lack of node activity cannot be solved by classical security means that aim at verifying the correctness and integrity of an operation. We suggest a generic mechanism based on reputation to enforce cooperation among the nodes of a MANET to prevent selfish behavior. Each network entity keeps track of other entities{\textquoteright} collaboration using a technique called reputation. The reputation is calculated based on various types of information on each entity{\textquoteright}s rate of collaboration. Since there is no incentive for a node to maliciously spread negative information about other nodes, simple denial of service attacks using the collaboration technique itself are prevented. The generic mechanism can be smoothly extended to basic network functions with little impact on existing protocols},
@@ -14108,6 +14107,7 @@ In this paper, we propose a new cryptographic le system, which we call TCFS , as
}
@article {Luby01efficienterasure,
title = {Efficient erasure correcting codes},
+ booktitle = {Efficient erasure correcting codes},
journal = {IEEE Transactions on Information Theory},
volume = {47},
year = {2001},
@@ -14531,6 +14531,7 @@ This compilation represents the collected wisdom of today{\textquoteright}s peer
address = {Los Alamitos, CA, USA},
isbn = {0-7695-1503-7},
doi = {http://doi.ieeecomputersociety.org/10.1109/P2P.2001.990421},
+ www_section = {https://bibliography.gnunet.org},
author = {Sherif Botros and Steve Waterhouse}
}
@article {cheap-pseudonyms,
@@ -14693,6 +14694,7 @@ This book focuses on the principal-agent model, the "simple" situation where a p
booktitle = {Workshop on Design Issues in Anonymity and Unobservability},
year = {2000},
pages = {1--9},
+ www_section = {https://bibliography.gnunet.org},
author = {Andreas Pfitzmann and Marit K{\"o}hntopp}
}
@conference {mitkuro,
@@ -14835,6 +14837,7 @@ This book focuses on the principal-agent model, the "simple" situation where a p
type = {White Paper},
abstract = {This white paper, targeted at the technically savvy reader, offers a detailed look at the Freedom 2.0 System architecture. It is intended to give the reader a good understanding of the components that make up this system and the relationships between them, as well as to encourage analysis of the system},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/freedom2-arch.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Philippe Boucher and Adam Shostack and Ian Goldberg}
}
@conference {Clarke00freenet:a_0,
@@ -14863,6 +14866,7 @@ This book focuses on the principal-agent model, the "simple" situation where a p
}
@conference {Clarke00freenet:a,
title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System},
+ booktitle = {Freenet: A Distributed Anonymous Information Storage and Retrieval System},
year = {2000},
pages = {46--66},
abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node},
@@ -14958,6 +14962,7 @@ Results based on simulations confirm that Overcast provides its added functional
abstract = {It is desirable to store data on data storage servers such as mail servers and file servers in encrypted form to reduce security and privacy risks. But this usually implies that one has to sacrifice functionality for security. For example, if a client wishes to retrieve only documents containing certain words, it was not previously known how to let the data storage server perform the search and answer the query without loss of data confidentiality},
isbn = {0-7695-0665-8},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/encrypteddata.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Dawn Xiaodong Song and David Wagner and Adrian Perrig}
}
@conference {Shields00aprotocol,
@@ -15029,6 +15034,7 @@ Results based on simulations confirm that Overcast provides its added functional
pages = {0--232},
keywords = {set reconciliation},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/reconcile.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Yaron Minsky and Ari Trachtenberg and Richard Zippel}
}
@conference {Papadopouli00sevendegrees,
@@ -15253,6 +15259,7 @@ This exposition presents a model to formally study such algorithms. This model,
school = {University of Edinburgh},
abstract = {This report describes an algorithm which if executed by a group of interconnected nodes will provide a robust key-indexed information storage and retrieval system with no element of central control or administration. It allows information to be made available to a large group of people in a similar manner to the "World Wide Web". Improvements over this existing system include:--No central control or administration required--Anonymous information publication and retrieval--Dynamic duplication of popular information--Transfer of information location depending upon demand There is also potential for this system to be used in a modified form as an information publication system within a large organisation which may wish to utilise unused storage space which is distributed across the organisation. The system{\textquoteright}s reliability is not guaranteed, nor is its efficiency, however the intention is that the efficiency and reliability will be sufficient to make the system useful, and demonstrate that},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.32.3665\&rep=rep1\&type=pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Ian Clarke}
}
@conference {301333,
@@ -15266,6 +15273,7 @@ This exposition presents a model to formally study such algorithms. This model,
isbn = {1-58113-099-6},
doi = {http://doi.acm.org/10.1145/301308.301333},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/flash-mix.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Jakobsson, Markus}
}
@conference {syverson99,
@@ -15747,6 +15755,7 @@ for future loss recovery. With the adaptive algorithm, our reliable multicast de
abstract = {An important issue to be addressed for the next generation of wirelessly-connected hand-held devices is battery longevity. In this paper we examine this issue from the point of view of the Network Interface (NI). In particular, we measure the power usage of two PDAs, the Apple Newton Messagepad and Sony Magic Link, and four NIs, the Metricom Ricochet Wireless Modem, the AT\&T Wavelan operating at 915 MHz and 2.4 GHz, and the IBM Infrared Wireless LAN Adapter. These measurements clearly indicate that the power drained by the network interface constitutes a large fraction of the total power used by the PDA. We also conduct trace-driven simulation experiments and show that by using applicationspecific policies it is possible to },
url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.8384},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/10.1.1.39.8384.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Mark Stemm and Paul Gauthier and Daishi Harada and Katz, Randy H.}
}
@conference {672869,
@@ -15982,6 +15991,7 @@ Elliptic Curve Public Key Cryptosystems is a valuable reference resource for res
keywords = {artificial intelligence, DCOP, PARC, partially adversial cooperation},
journal = {unknown},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Tech\%20report\%20-\%20DCOP\%20as\%20a\%20formal\%20model\%20of\%20PARC.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Makoto Yokoo and Edmund H. Durfee}
}
@conference {Deswarte91intrusiontolerance,
@@ -16172,6 +16182,7 @@ We also sketch applications of these signatures to a payment system, solving dis
}
@conference {Tanenbaum86usingsparse,
title = {Using Sparse Capabilities in a Distributed Operating System},
+ booktitle = {Using Sparse Capabilities in a Distributed Operating System},
year = {1986},
pages = {558--563},
abstract = {this paper we discuss a system, Amoeba, that uses capabilities for naming and protecting objects. In contrast to traditional, centralized operating systems, in which capabilities are managed by the operating system kernel, in Amoeba all the capabilities are managed directly by user code. To prevent tampering, the capabilities are protected cryptographically. The paper describes a variety of the issues involved, and gives four different ways of dealing with the access rights},
@@ -16310,6 +16321,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
issn = {0001-0782 },
doi = {http://doi.acm.org/10.1145/358549.358563},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/chaum-mix_0.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {David Chaum}
}
@article {10.1109/SP.1980.10006,
@@ -16327,6 +16339,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
}
@conference {1979,
title = {Compact Encodings of List Structure},
+ booktitle = {Compact Encodings of List Structure},
year = {1979},
publisher = {ACM New York, NY, USA},
organization = {ACM New York, NY, USA},
@@ -16357,6 +16370,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
address = {Cambridge, MA},
abstract = {This thesis examines the issues relating to non-discretionary access controls for decentralized computing systems. Decentralization changes the basic character of a computing system from a set of processes referencing a data base to a set of processes sending and receiving messages. Because messages must be acknowledged, operations that were read-only in a centralized system become read-write operations. As a result, the lattice model of non-discretionary access control, which mediates operations based on read versus read-write considerations, does not allow direct transfer of algorithms from centralized systems to decentralized systems. This thesis develops new mechanisms that comply with the lattice model and provide the necessary functions for effective decentralized computation. Secure protocols at several different levels are presented in the thesis. At the lowest level, a host or host protocol is shown that allows communication between hosts with effective internal security controls. Above this level, a host independent naming scheme is presented that allows generic naming of services in a manner consistent with the lattice model. The use of decentralized processing to aid in the downgrading of information is shown in the design of a secure intelligent terminal. Schemes are presented to deal with the decentralized administration of the lattice model, and with the proliferation of access classes as the user community of a decentralized system become more diverse. Limitations in the use of end-to-end encryption when used with the lattice model are identified, and a scheme is presented to relax these limitations for broadcast networks. Finally, a scheme is presented for forwarding authentication information between hosts on a network, without transmitting passwords (or their equivalent) over a network},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/MIT-LCS-TR-179.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Paul A. Karger}
}
@article {1977,
@@ -16366,6 +16380,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
year = {1977},
pages = {2--1},
keywords = {database_privacy differential_privacy stat},
+ www_section = {https://bibliography.gnunet.org},
author = {Dalenius, T.}
}
@article {1076,
@@ -16380,6 +16395,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
issn = {0018-9448},
doi = {10.1109/TIT.1976.1055638},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/IEEE\%20Trans.\%20on\%20Info.\%20-\%20New\%20directions\%20in\%20cryptography.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Whitfield Diffie and Martin E. Hellman}
}
@article {1971,
@@ -16405,6 +16421,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
abstract = {We consider the problem of partitioning the nodes of a graph with costs on its edges into subsets of given sizes so as to minimize the sum of the costs on all edges cut. This problem arises in several physical situations- for example, in assigning the components of electronic circuits to circuit boards to minimize the number of connections between boards. This paper presents a heuristic method for partitioning arbitrary graphs which is both effective in finding optimal partitions, and fast enough to be practical in solving large problems},
keywords = {heuristic method, partitioning graphs},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Kernighan\%20\%26\%20Lin\%20-\%20An\%20Efficient\%20Heuristic\%20Procedure\%20for\%20Partitioning\%20Graphs\%250A.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Brian W. Kernighan and S. Lin}
}
@article {1970_1,
@@ -16437,6 +16454,7 @@ The technique can also be used to form rosters of untraceable digital pseudonyms
year = {1968},
pages = {1243--1248},
www_pdf_url = {https://gnunet.org/git/bibliography.git/tree/docs/Science\%20-\%20Hardin\%20-\%20The\%20Tragedy\%20of\%20the\%20Commons.pdf},
+ www_section = {https://bibliography.gnunet.org},
author = {Garrett Hardin}
}
@article {1962,