aboutsummaryrefslogtreecommitdiff
path: root/src/dht/gnunet-service-dht.c
diff options
context:
space:
mode:
authorNathan S. Evans <evans@in.tum.de>2010-09-07 14:33:14 +0000
committerNathan S. Evans <evans@in.tum.de>2010-09-07 14:33:14 +0000
commita6f9e3d3b78cb1e1333965620e957ffeed709ed0 (patch)
treed26f26029a9784c16293998504003412cf8dde28 /src/dht/gnunet-service-dht.c
parent13d8744f99d41435783f28fd683784705194a281 (diff)
downloadgnunet-a6f9e3d3b78cb1e1333965620e957ffeed709ed0.tar.gz
gnunet-a6f9e3d3b78cb1e1333965620e957ffeed709ed0.zip
add republish support in dht (default once an hour, just like kademlia), improved next hop selection (or so I think), use 'TrialInfo' struct in dhtlog testcase
Diffstat (limited to 'src/dht/gnunet-service-dht.c')
-rw-r--r--src/dht/gnunet-service-dht.c276
1 files changed, 230 insertions, 46 deletions
diff --git a/src/dht/gnunet-service-dht.c b/src/dht/gnunet-service-dht.c
index 5150321df..5f08c8a0d 100644
--- a/src/dht/gnunet-service-dht.c
+++ b/src/dht/gnunet-service-dht.c
@@ -44,7 +44,7 @@
44 44
45#define PRINT_TABLES GNUNET_NO 45#define PRINT_TABLES GNUNET_NO
46 46
47#define REAL_DISTANCE GNUNET_YES 47#define REAL_DISTANCE GNUNET_NO
48 48
49#define EXTRA_CHECKS GNUNET_NO 49#define EXTRA_CHECKS GNUNET_NO
50/** 50/**
@@ -101,6 +101,11 @@
101/** 101/**
102 * Default replication parameter for find peer messages sent by the dht service. 102 * Default replication parameter for find peer messages sent by the dht service.
103 */ 103 */
104#define DHT_DEFAULT_PUT_REPLICATION 4
105
106/**
107 * Default replication parameter for find peer messages sent by the dht service.
108 */
104#define DHT_DEFAULT_FIND_PEER_REPLICATION 4 109#define DHT_DEFAULT_FIND_PEER_REPLICATION 4
105 110
106/** 111/**
@@ -614,6 +619,20 @@ struct RecentRequest
614 GNUNET_SCHEDULER_TaskIdentifier remove_task; 619 GNUNET_SCHEDULER_TaskIdentifier remove_task;
615}; 620};
616 621
622struct RepublishContext
623{
624 /**
625 * Key to republish.
626 */
627 GNUNET_HashCode key;
628
629 /**
630 * Type of the data.
631 */
632 unsigned int type;
633
634};
635
617/** 636/**
618 * Which kind of convergence will we be using? 637 * Which kind of convergence will we be using?
619 */ 638 */
@@ -634,23 +653,25 @@ static struct FindPeerMessageContext find_peer_context;
634 * to closest peer; initially send requests to 3 653 * to closest peer; initially send requests to 3
635 * peers. 654 * peers.
636 */ 655 */
637static int strict_kademlia; 656static unsigned int strict_kademlia;
638 657
639/** 658/**
640 * Routing option to end routing when closest peer found. 659 * Routing option to end routing when closest peer found.
641 */ 660 */
642static int stop_on_closest; 661static unsigned int stop_on_closest;
643 662
644/** 663/**
645 * Routing option to end routing when data is found. 664 * Routing option to end routing when data is found.
646 */ 665 */
647static int stop_on_found; 666static unsigned int stop_on_found;
648 667
649/** 668/**
650 * Whether DHT needs to manage find peer requests, or 669 * Whether DHT needs to manage find peer requests, or
651 * an external force will do it on behalf of the DHT. 670 * an external force will do it on behalf of the DHT.
652 */ 671 */
653static int do_find_peer; 672static unsigned int do_find_peer;
673
674static unsigned int use_real_distance;
654 675
655/** 676/**
656 * How many peers have we added since we sent out our last 677 * How many peers have we added since we sent out our last
@@ -1299,20 +1320,16 @@ update_core_preference (void *cls,
1299 * @param peer identifies the peer 1320 * @param peer identifies the peer
1300 * @param bpm_in set to the current bandwidth limit (receiving) for this peer 1321 * @param bpm_in set to the current bandwidth limit (receiving) for this peer
1301 * @param bpm_out set to the current bandwidth limit (sending) for this peer 1322 * @param bpm_out set to the current bandwidth limit (sending) for this peer
1302 * @param latency current latency estimate, "FOREVER" if we have been
1303 * disconnected
1304 * @param amount set to the amount that was actually reserved or unreserved; 1323 * @param amount set to the amount that was actually reserved or unreserved;
1305 * either the full requested amount or zero (no partial reservations) 1324 * either the full requested amount or zero (no partial reservations)
1306 * @param preference current traffic preference for the given peer 1325 * @param preference current traffic preference for the given peer
1307 */ 1326 */
1308static void 1327static void
1309update_core_preference_finish (void *cls, 1328update_core_preference_finish (void *cls,
1310 const struct 1329 const struct GNUNET_PeerIdentity * peer,
1311 GNUNET_PeerIdentity * peer,
1312 struct GNUNET_BANDWIDTH_Value32NBO bpm_in, 1330 struct GNUNET_BANDWIDTH_Value32NBO bpm_in,
1313 struct GNUNET_BANDWIDTH_Value32NBO bpm_out, 1331 struct GNUNET_BANDWIDTH_Value32NBO bpm_out,
1314 int amount, 1332 int amount, uint64_t preference)
1315 uint64_t preference)
1316{ 1333{
1317 struct PeerInfo *peer_info = cls; 1334 struct PeerInfo *peer_info = cls;
1318 peer_info->info_ctx = NULL; 1335 peer_info->info_ctx = NULL;
@@ -1325,13 +1342,18 @@ update_core_preference (void *cls,
1325{ 1342{
1326 struct PeerInfo *peer = cls; 1343 struct PeerInfo *peer = cls;
1327 uint64_t preference; 1344 uint64_t preference;
1328 1345 unsigned int matching;
1329 if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN) 1346 if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
1330 { 1347 {
1331 return; 1348 return;
1332 } 1349 }
1333 1350 matching = matching_bits(&my_identity.hashPubKey, &peer->id.hashPubKey);
1334 preference = 2 << matching_bits(&my_identity.hashPubKey, &peer->id.hashPubKey); 1351 if (matching >= 64)
1352 {
1353 GNUNET_log(GNUNET_ERROR_TYPE_WARNING, "Peer identifier matches by %u bits, only shifting as much as we can!\n", matching_bits);
1354 matching = 63;
1355 }
1356 preference = 1LL << matching;
1335 peer->info_ctx = GNUNET_CORE_peer_change_preference (sched, cfg, 1357 peer->info_ctx = GNUNET_CORE_peer_change_preference (sched, cfg,
1336 &peer->id, 1358 &peer->id,
1337 GNUNET_TIME_relative_get_forever(), 1359 GNUNET_TIME_relative_get_forever(),
@@ -1672,7 +1694,10 @@ void schedule_ping_messages()
1672/** 1694/**
1673 * Attempt to add a peer to our k-buckets. 1695 * Attempt to add a peer to our k-buckets.
1674 * 1696 *
1675 * @param peer, the peer identity of the peer being added 1697 * @param peer the peer identity of the peer being added
1698 * @param bucket the bucket that we want this peer to go in
1699 * @param latency transport latency of this peer
1700 * @param distance transport distance to this peer
1676 * 1701 *
1677 * @return NULL if the peer was not added, 1702 * @return NULL if the peer was not added,
1678 * pointer to PeerInfo for new peer otherwise 1703 * pointer to PeerInfo for new peer otherwise
@@ -1859,7 +1884,10 @@ static int consider_peer (struct GNUNET_PeerIdentity *peer)
1859 * Main function that handles whether or not to route a result 1884 * Main function that handles whether or not to route a result
1860 * message to other peers, or to send to our local client. 1885 * message to other peers, or to send to our local client.
1861 * 1886 *
1887 * @param cls closure (unused, always should be NULL)
1862 * @param msg the result message to be routed 1888 * @param msg the result message to be routed
1889 * @param message_context context of the message we are routing
1890 *
1863 * @return the number of peers the message was routed to, 1891 * @return the number of peers the message was routed to,
1864 * GNUNET_SYSERR on failure 1892 * GNUNET_SYSERR on failure
1865 */ 1893 */
@@ -2176,7 +2204,7 @@ remove_recent_find_peer(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc
2176 * 2204 *
2177 */ 2205 */
2178static void 2206static void
2179handle_dht_find_peer (void *cls, 2207handle_dht_find_peer (void *cls,
2180 const struct GNUNET_MessageHeader *find_msg, 2208 const struct GNUNET_MessageHeader *find_msg,
2181 struct DHT_MessageContext *message_context) 2209 struct DHT_MessageContext *message_context)
2182{ 2210{
@@ -2290,6 +2318,15 @@ handle_dht_find_peer (void *cls,
2290 GNUNET_free(find_peer_result); 2318 GNUNET_free(find_peer_result);
2291} 2319}
2292 2320
2321/**
2322 * Task used to republish data.
2323 * Forward declaration; function call loop.
2324 *
2325 * @param cls closure (a struct RepublishContext)
2326 * @param tc runtime context for this task
2327 */
2328static void
2329republish_content(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc);
2293 2330
2294/** 2331/**
2295 * Server handler for initiating local dht put requests 2332 * Server handler for initiating local dht put requests
@@ -2306,6 +2343,8 @@ handle_dht_put (void *cls,
2306 struct GNUNET_DHT_PutMessage *put_msg; 2343 struct GNUNET_DHT_PutMessage *put_msg;
2307 size_t put_type; 2344 size_t put_type;
2308 size_t data_size; 2345 size_t data_size;
2346 int ret;
2347 struct RepublishContext *put_context;
2309 2348
2310 GNUNET_assert (ntohs (msg->size) >= 2349 GNUNET_assert (ntohs (msg->size) >=
2311 sizeof (struct GNUNET_DHT_PutMessage)); 2350 sizeof (struct GNUNET_DHT_PutMessage));
@@ -2357,9 +2396,19 @@ handle_dht_put (void *cls,
2357 2396
2358 increment_stats(STAT_PUTS_INSERTED); 2397 increment_stats(STAT_PUTS_INSERTED);
2359 if (datacache != NULL) 2398 if (datacache != NULL)
2360 GNUNET_DATACACHE_put (datacache, &message_context->key, data_size, 2399 {
2361 (char *) &put_msg[1], put_type, 2400 ret = GNUNET_DATACACHE_put (datacache, &message_context->key, data_size,
2362 GNUNET_TIME_absolute_ntoh(put_msg->expiration)); 2401 (char *) &put_msg[1], put_type,
2402 GNUNET_TIME_absolute_ntoh(put_msg->expiration));
2403
2404 if (ret == GNUNET_YES)
2405 {
2406 put_context = GNUNET_malloc(sizeof(struct RepublishContext));
2407 memcpy(&put_context->key, &message_context->key, sizeof(GNUNET_HashCode));
2408 put_context->type = put_type;
2409 GNUNET_SCHEDULER_add_delayed (sched, DHT_REPUBLISH_FREQUENCY, &republish_content, put_context);
2410 }
2411 }
2363 else 2412 else
2364 GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, 2413 GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
2365 "`%s:%s': %s request received, but have no datacache!\n", 2414 "`%s:%s': %s request received, but have no datacache!\n",
@@ -2555,10 +2604,18 @@ am_closest_peer (const GNUNET_HashCode * target, struct GNUNET_CONTAINER_BloomFi
2555 * to a closer peer (if closer peers exist) or to choose 2604 * to a closer peer (if closer peers exist) or to choose
2556 * from the whole set of peers. 2605 * from the whole set of peers.
2557 * 2606 *
2607 * @param target the key of the request
2608 * @param bloom bloomfilter of peers this request has already traversed
2558 * @param hops number of hops this message has already traveled 2609 * @param hops number of hops this message has already traveled
2610 *
2611 * @return GNUNET_YES if we should try to route to a closer peer
2612 * than ourselves (and one exists), GNUNET_NO if we should
2613 * choose from the set of all known peers
2614 *
2559 */ 2615 */
2560int 2616int
2561route_closer (const GNUNET_HashCode *target, struct GNUNET_CONTAINER_BloomFilter *bloom, 2617route_closer (const GNUNET_HashCode *target,
2618 struct GNUNET_CONTAINER_BloomFilter *bloom,
2562 unsigned int hops) 2619 unsigned int hops)
2563{ 2620{
2564 unsigned int my_matching_bits; 2621 unsigned int my_matching_bits;
@@ -2632,30 +2689,32 @@ route_closer (const GNUNET_HashCode *target, struct GNUNET_CONTAINER_BloomFilter
2632 * 2689 *
2633 * @param target the key we are selecting a peer to route to 2690 * @param target the key we are selecting a peer to route to
2634 * @param bloom a bloomfilter containing entries this request has seen already 2691 * @param bloom a bloomfilter containing entries this request has seen already
2692 * @param hops the number of hops this message has already traversed
2635 * 2693 *
2636 * @return Peer to route to, or NULL on error 2694 * @return Peer to route to, or NULL on error
2637 */ 2695 */
2638static struct PeerInfo * 2696static struct PeerInfo *
2639select_peer (const GNUNET_HashCode * target, 2697select_peer (const GNUNET_HashCode * target,
2640 struct GNUNET_CONTAINER_BloomFilter *bloom, unsigned int hops) 2698 struct GNUNET_CONTAINER_BloomFilter *bloom,
2699 unsigned int hops)
2641{ 2700{
2642 unsigned int distance; 2701 unsigned int distance;
2643 unsigned int bc; 2702 unsigned int bc;
2644 unsigned int count; 2703 unsigned int count;
2645 unsigned int my_matching_bits; 2704 unsigned int my_matching_bits;
2646 unsigned long long largest_distance; 2705 unsigned long long largest_distance;
2647#if REAL_DISTANCE 2706 unsigned long long total_real_distance;
2648 unsigned long long total_distance; 2707 unsigned long long real_selected;
2649 unsigned long long selected;
2650#else
2651 unsigned int total_distance; 2708 unsigned int total_distance;
2652 unsigned int selected; 2709 unsigned int selected;
2653#endif 2710 unsigned int match_num;
2654
2655 int only_closer; 2711 int only_closer;
2656 struct PeerInfo *pos; 2712 struct PeerInfo *pos;
2657 struct PeerInfo *chosen; 2713 struct PeerInfo *chosen;
2658 char *temp_stat; 2714 char *temp_stat;
2715#if DEBUG_DHT_ROUTING
2716 double sum;
2717#endif
2659 2718
2660 my_matching_bits = matching_bits(target, &my_identity.hashPubKey); 2719 my_matching_bits = matching_bits(target, &my_identity.hashPubKey);
2661 only_closer = route_closer(target, bloom, hops); 2720 only_closer = route_closer(target, bloom, hops);
@@ -2723,15 +2782,14 @@ select_peer (const GNUNET_HashCode * target,
2723 if ((GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey)) && 2782 if ((GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey)) &&
2724 ((only_closer == GNUNET_NO) || (matching_bits(target, &pos->id.hashPubKey) >= my_matching_bits))) 2783 ((only_closer == GNUNET_NO) || (matching_bits(target, &pos->id.hashPubKey) >= my_matching_bits)))
2725 { 2784 {
2726#if REAL_DISTANCE /* Use the "real" distance as computed by the inverse_distance function */ 2785 if (GNUNET_YES == use_real_distance)
2727 /** The "real" distance is best for routing to the closest peer, but in practice 2786 total_real_distance += (unsigned long long)inverse_distance (target, &pos->id.hashPubKey);
2728 * (with our routing algorithm) it is usually better to use the squared bit distance. 2787 else
2729 * This gives us a higher probability of routing towards close peers. 2788 {
2730 */ 2789 /* Always add 1, in case 0 bits match! */
2731 total_distance += (unsigned long long)inverse_distance (target, &pos->id.hashPubKey); 2790 match_num = 1 + (matching_bits(target, &pos->id.hashPubKey) * matching_bits(target ,&pos->id.hashPubKey));
2732#else 2791 total_distance += match_num;
2733 total_distance += matching_bits(target, &pos->id.hashPubKey) * matching_bits(target ,&pos->id.hashPubKey); 2792 }
2734#endif
2735 } 2793 }
2736 #if DEBUG_DHT > 1 2794 #if DEBUG_DHT > 1
2737 GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, 2795 GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
@@ -2742,13 +2800,14 @@ select_peer (const GNUNET_HashCode * target,
2742 count++; 2800 count++;
2743 } 2801 }
2744 } 2802 }
2745 if (total_distance == 0) 2803
2804 if (((GNUNET_YES == use_real_distance) && (total_real_distance == 0)) || (total_distance == 0))
2746 { 2805 {
2747 increment_stats("# select_peer, total_distance == 0"); 2806 increment_stats("# select_peer, total_distance == 0");
2748 return NULL; 2807 return NULL;
2749 } 2808 }
2750 2809
2751 selected = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, total_distance); 2810#if DEBUG_DHT_ROUTING
2752 for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++) 2811 for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
2753 { 2812 {
2754 pos = k_buckets[bc].head; 2813 pos = k_buckets[bc].head;
@@ -2758,17 +2817,57 @@ select_peer (const GNUNET_HashCode * target,
2758 if ((GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey)) && 2817 if ((GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey)) &&
2759 ((only_closer == GNUNET_NO) || (matching_bits(target, &pos->id.hashPubKey) >= my_matching_bits))) 2818 ((only_closer == GNUNET_NO) || (matching_bits(target, &pos->id.hashPubKey) >= my_matching_bits)))
2760 { 2819 {
2761#if REAL_DISTANCE 2820 if (GNUNET_YES == use_real_distance)
2762 distance = inverse_distance (target, &pos->id.hashPubKey); 2821 {
2763#else 2822 GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "REAL: Choose peer with %d matching bits (%.2f percent)\n", matching_bits(&pos->id.hashPubKey, target), (inverse_distance (target, &pos->id.hashPubKey) / (double)total_real_distance) * 100);
2764 distance = matching_bits(target, &pos->id.hashPubKey) * matching_bits(target, &pos->id.hashPubKey); 2823 sum += inverse_distance (target, &pos->id.hashPubKey) / (double)total_real_distance;
2824 }
2825 else
2826 {
2827 match_num = 1 + (matching_bits(&pos->id.hashPubKey, target) * matching_bits(&pos->id.hashPubKey, target));
2828 GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "Choose peer with %d matching bits (%.2f percent)\n", matching_bits(&pos->id.hashPubKey, target), (match_num / (double)total_distance) * 100);
2829 sum += match_num / (double)total_distance;
2830 }
2831 }
2832 pos = pos->next;
2833 count++;
2834 }
2835 }
2836 fprintf(stdout, "Sum is %f\n", sum);
2765#endif 2837#endif
2766 if (distance > selected) 2838
2839 real_selected = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, total_real_distance);
2840 selected = GNUNET_CRYPTO_random_u32 (GNUNET_CRYPTO_QUALITY_WEAK, total_distance);
2841
2842 for (bc = lowest_bucket; bc < MAX_BUCKETS; bc++)
2843 {
2844 pos = k_buckets[bc].head;
2845 count = 0;
2846 while ((pos != NULL) && (count < bucket_size))
2847 {
2848 if ((GNUNET_NO == GNUNET_CONTAINER_bloomfilter_test (bloom, &pos->id.hashPubKey)) &&
2849 ((only_closer == GNUNET_NO) || (matching_bits(target, &pos->id.hashPubKey) >= my_matching_bits)))
2850 {
2851 if (GNUNET_YES == use_real_distance)
2852 {
2853 distance = inverse_distance (target, &pos->id.hashPubKey);
2854 if (distance > real_selected)
2855 {
2856 GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "Selected peer with %u matching bits to route to\n", distance);
2857 return pos;
2858 }
2859 real_selected -= distance;
2860 }
2861 else
2767 { 2862 {
2768 GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "Selected peer with %u matching bits to route to\n", distance); 2863 distance = 1 + (matching_bits(target, &pos->id.hashPubKey) * matching_bits(target, &pos->id.hashPubKey));
2769 return pos; 2864 if (distance > selected)
2865 {
2866 GNUNET_log(GNUNET_ERROR_TYPE_DEBUG, "Selected peer with %u matching bits to route to\n", distance);
2867 return pos;
2868 }
2869 selected -= distance;
2770 } 2870 }
2771 selected -= distance;
2772 } 2871 }
2773 else 2872 else
2774 { 2873 {
@@ -3127,6 +3226,86 @@ static int route_message(void *cls,
3127} 3226}
3128 3227
3129/** 3228/**
3229 * Iterator for local get request results,
3230 *
3231 * @param cls closure for iterator, NULL
3232 * @param exp when does this value expire?
3233 * @param key the key this data is stored under
3234 * @param size the size of the data identified by key
3235 * @param data the actual data
3236 * @param type the type of the data
3237 *
3238 * @return GNUNET_OK to continue iteration, anything else
3239 * to stop iteration.
3240 */
3241static int
3242republish_content_iterator (void *cls,
3243 struct GNUNET_TIME_Absolute exp,
3244 const GNUNET_HashCode * key,
3245 uint32_t size, const char *data, uint32_t type)
3246{
3247
3248 struct DHT_MessageContext *new_msg_ctx;
3249 struct GNUNET_DHT_PutMessage *put_msg;
3250#if DEBUG_DHT
3251 GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
3252 "`%s:%s': Received `%s' response from datacache\n", my_short_id, "DHT", "GET");
3253#endif
3254 new_msg_ctx = GNUNET_malloc(sizeof(struct DHT_MessageContext));
3255
3256 put_msg =
3257 GNUNET_malloc (sizeof (struct GNUNET_DHT_PutMessage) + size);
3258 put_msg->header.type = htons (GNUNET_MESSAGE_TYPE_DHT_PUT);
3259 put_msg->header.size = htons (sizeof (struct GNUNET_DHT_PutMessage) + size);
3260 put_msg->expiration = GNUNET_TIME_absolute_hton(exp);
3261 put_msg->type = htons (type);
3262 memcpy (&put_msg[1], data, size);
3263 new_msg_ctx->unique_id = GNUNET_ntohll (GNUNET_CRYPTO_random_u64(GNUNET_CRYPTO_QUALITY_WEAK, (uint64_t)-1));
3264 new_msg_ctx->replication = ntohl (DHT_DEFAULT_PUT_REPLICATION);
3265 new_msg_ctx->msg_options = ntohl (0);
3266 new_msg_ctx->network_size = estimate_diameter();
3267 new_msg_ctx->peer = &my_identity;
3268 new_msg_ctx->bloom = GNUNET_CONTAINER_bloomfilter_init (NULL, DHT_BLOOM_SIZE, DHT_BLOOM_K);
3269 new_msg_ctx->hop_count = 0;
3270 new_msg_ctx->importance = DHT_DEFAULT_P2P_IMPORTANCE;
3271 new_msg_ctx->timeout = DHT_DEFAULT_P2P_TIMEOUT;
3272 increment_stats(STAT_PUT_START);
3273 route_message(cls, &put_msg->header, new_msg_ctx);
3274
3275 GNUNET_free(new_msg_ctx);
3276 GNUNET_free (put_msg);
3277 return GNUNET_OK;
3278}
3279
3280/**
3281 * Task used to republish data.
3282 *
3283 * @param cls closure (a struct RepublishContext)
3284 * @param tc runtime context for this task
3285 */
3286static void
3287republish_content(void *cls, const struct GNUNET_SCHEDULER_TaskContext *tc)
3288{
3289 struct RepublishContext *put_context = cls;
3290
3291 unsigned int results;
3292
3293 if (tc->reason == GNUNET_SCHEDULER_REASON_SHUTDOWN)
3294 {
3295 GNUNET_free(put_context);
3296 return;
3297 }
3298
3299 GNUNET_assert (datacache != NULL); /* If we have no datacache we never should have scheduled this! */
3300 results = GNUNET_DATACACHE_get(datacache, &put_context->key, put_context->type, &republish_content_iterator, NULL);
3301 if (results == 0) /* Data must have expired */
3302 GNUNET_free(put_context);
3303 else /* Reschedule task for next time period */
3304 GNUNET_SCHEDULER_add_delayed(sched, DHT_REPUBLISH_FREQUENCY, &republish_content, put_context);
3305
3306}
3307
3308/**
3130 * Find a client if it exists, add it otherwise. 3309 * Find a client if it exists, add it otherwise.
3131 * 3310 *
3132 * @param client the server handle to the client 3311 * @param client the server handle to the client
@@ -3926,6 +4105,11 @@ run (void *cls,
3926 do_find_peer = GNUNET_YES; 4105 do_find_peer = GNUNET_YES;
3927 4106
3928 if (GNUNET_YES == 4107 if (GNUNET_YES ==
4108 GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht",
4109 "use_real_distance"))
4110 use_real_distance = GNUNET_YES;
4111
4112 if (GNUNET_YES ==
3929 GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht_testing", 4113 GNUNET_CONFIGURATION_get_value_yesno(cfg, "dht_testing",
3930 "mysql_logging_extended")) 4114 "mysql_logging_extended"))
3931 { 4115 {