From 6ab60d4920bb3199aee8cd872b930e9e3e808ba7 Mon Sep 17 00:00:00 2001 From: Nils Gillmann Date: Sat, 19 May 2018 14:43:13 +0000 Subject: Restructure contrib folder. contrib/pogen.sh -> bin/pogen.sh bootstrap: Use new pogen location and execute it. contrib/openvpn-tap32: Move to contrib/3rdparty/Windows/openvpn-tap32. contrib/gnunet-logo*: Move to contrib/branding/logo/ Delete old patches in contrib, predating git. Move buildbot data to contrib/ci/buildbot, move docker data to contrib/ci/docker. Create contrib/conf and populate it with config files found in contrib and bin. Move gns related data to contrib/gns. delete contrib/repeat.sh Move contrib/log.php into contrib/web/log.php. Create folder contrib/scripts and use it for most scripts in contrib. Remove trailing whitespace in doc/Makefile.am Signed-off-by: Nils Gillmann --- contrib/scripts/coverage.sh | 14 + contrib/scripts/debug | 37 ++ contrib/scripts/find_typedefs.py | 99 ++++++ contrib/scripts/gdb-iterate-dll.py | 40 +++ contrib/scripts/generate-monkey-db.sh | 17 + contrib/scripts/gnunet-chk.py.in | 383 +++++++++++++++++++++ contrib/scripts/gnunet-logread/gnunet-logread | 198 +++++++++++ contrib/scripts/gnunet-logread/gnunet-logread-ipc | 10 + .../gnunet-logread/gnunet-logread-ipc-sdedit | 60 ++++ contrib/scripts/gnunet-suidfix | 27 ++ contrib/scripts/gnunet_janitor.py.in | 78 +++++ contrib/scripts/gnunet_pyexpect.py.in | 83 +++++ contrib/scripts/process_log.sh | 30 ++ contrib/scripts/pydiffer.py.in | 44 +++ contrib/scripts/pydmesg | 75 ++++ contrib/scripts/regression.sh | 54 +++ contrib/scripts/removetrailingwhitespace.py.in | 15 + contrib/scripts/report.sh | 252 ++++++++++++++ contrib/scripts/revisionary.sh | 98 ++++++ contrib/scripts/terminate.py.in | 64 ++++ contrib/scripts/testbed_cleanup.sh | 14 + contrib/scripts/texinfo-hacks.el | 18 + contrib/scripts/visualize_stats.sh | 86 +++++ contrib/scripts/zonewalk-to-types.sh | 35 ++ 24 files changed, 1831 insertions(+) create mode 100755 contrib/scripts/coverage.sh create mode 100755 contrib/scripts/debug create mode 100644 contrib/scripts/find_typedefs.py create mode 100644 contrib/scripts/gdb-iterate-dll.py create mode 100755 contrib/scripts/generate-monkey-db.sh create mode 100755 contrib/scripts/gnunet-chk.py.in create mode 100755 contrib/scripts/gnunet-logread/gnunet-logread create mode 100755 contrib/scripts/gnunet-logread/gnunet-logread-ipc create mode 100755 contrib/scripts/gnunet-logread/gnunet-logread-ipc-sdedit create mode 100755 contrib/scripts/gnunet-suidfix create mode 100644 contrib/scripts/gnunet_janitor.py.in create mode 100644 contrib/scripts/gnunet_pyexpect.py.in create mode 100755 contrib/scripts/process_log.sh create mode 100644 contrib/scripts/pydiffer.py.in create mode 100755 contrib/scripts/pydmesg create mode 100755 contrib/scripts/regression.sh create mode 100755 contrib/scripts/removetrailingwhitespace.py.in create mode 100755 contrib/scripts/report.sh create mode 100755 contrib/scripts/revisionary.sh create mode 100644 contrib/scripts/terminate.py.in create mode 100755 contrib/scripts/testbed_cleanup.sh create mode 100644 contrib/scripts/texinfo-hacks.el create mode 100755 contrib/scripts/visualize_stats.sh create mode 100755 contrib/scripts/zonewalk-to-types.sh (limited to 'contrib/scripts') diff --git a/contrib/scripts/coverage.sh b/contrib/scripts/coverage.sh new file mode 100755 index 000000000..dd6a6ab53 --- /dev/null +++ b/contrib/scripts/coverage.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# make sure configure was run with coverage enabled... +lcov --directory . --zerocounters +make check +rm `find * -name "test_*.gc??"` `find * -name "perf_*.gc??"` +for n in `find * -name "*.gc??" | grep libs` +do + cd `dirname $n` + mv `basename $n` .. + cd - +done +lcov --directory . --capture --output-file app.info +mkdir -p doc/coverage +genhtml -o doc/coverage app.info diff --git a/contrib/scripts/debug b/contrib/scripts/debug new file mode 100755 index 000000000..3de2c9a14 --- /dev/null +++ b/contrib/scripts/debug @@ -0,0 +1,37 @@ +#!/bin/bash +# /proc/sys/kernel/core_pattern should be core.%p.%E + +COREPID=$1 + +COREFILES=`ls -1 *core.$COREPID* 2>/dev/null | wc -l` +COREFILE=`ls -1 *core.$COREPID* 2>/dev/null | head -n 1` + +if [ $COREFILES -gt 1 ]; then + echo "Multiple files, using $COREFILE" +fi + + +if [ $COREFILES -eq 0 ]; then + SERVICENAME=$1 + COREFILES=`ls -1 core.*.*$SERVICENAME 2>/dev/null | wc -l` + COREFILE=`ls -1 core.*.*$SERVICENAME 2>/dev/null | head -n 1` + + if [ $COREFILES -gt 1 ]; then + echo "Multiple files, using $COREFILE" + fi +fi + +if [ $COREFILES -eq 0 ]; then + echo "Core file for $1 not found" + exit 1 +fi + +echo "Using $COREFILE" + +EXECPATH=${COREFILE#*!} +EXECPATH=`echo $EXECPATH | sed -e 's/!/\//g'` +echo $EXECPATH +echo "" +echo "" + +gdb --core $COREFILE /$EXECPATH diff --git a/contrib/scripts/find_typedefs.py b/contrib/scripts/find_typedefs.py new file mode 100644 index 000000000..68f5c2782 --- /dev/null +++ b/contrib/scripts/find_typedefs.py @@ -0,0 +1,99 @@ +from __future__ import print_function +import os +import re +import sys + + +debug = False + + +def get_td_from_function_signature(line, file, num): + left_paren = line.find('(') + if left_paren > 0: + left_paren += 1 + li = line[left_paren:] + right_paren = line.find(')') + if right_paren > 0 and right_paren > left_paren and line[right_paren:].find('(') >= 0: + fname = line[:right_paren] + fname = fname.lstrip(' ').lstrip('*').lstrip(' ').rstrip(' ') + if len(fname) > 0: + if debug: + print("from {0}:{1}".format(file, num)) + print("-T {0}".format(fname)) + + +def get_td_from_simple_type(line, file, num): + line = line.rstrip(' ').rstrip('\t').rstrip(' ').rstrip('\t') + right_space = line.rfind(' ') + right_tab = line.rfind('\t') + sep = right_tab if right_tab > right_space else right_space + sep += 1 + tname = line[sep:] + tname = tname.lstrip('*') + if len(tname) > 0: + if debug: + print("from {0}:{1}".format(file, num)) + print("-T {0}".format(tname)) + + +def find_typedefs(file): + with open(file, 'rb') as f: + td = False + td_struct = False + td_level = 0 + td_line = [] + data = f.read() + for i, l in enumerate(data.splitlines(False)): + # Don't try to be too smart: only count lines that begin with 'typedef ' + l = l.rstrip(' ').rstrip('\t') + if len(l) == 0: + continue + if not td: + if l[:8] != 'typedef ': + continue + else: + td = True + if l[8:].lstrip(' ').lstrip('\t')[:6] == 'struct': + td_struct = True + if td_struct: + leftcbrace = l.find('{') + if leftcbrace >= 0: + if td_level == 0: + td_line.append(l[:leftcbrace]) + l = l[leftcbrace + 1:] + td_level += 1 + rightcbrace = l.rfind('}') + if rightcbrace >= 0: + td_level -= 1 + if td_level == 0: + td_line.append(l[rightcbrace + 1:]) + else: + td_line.append(l) + if len(l) > 0 and l[-1] == ';' and(not td_struct or td_level == 0): + td_line = ' '.join(td_line) + td_line = td_line[:-1] + if len(td_line) > 0: + if td_line[-1] == ')': + get_td_from_function_signature(td_line, file, i) + else: + get_td_from_simple_type(td_line, file, i) + td_line = [] + td = False + td_struct = False + td_level = 0 + + +def scan_dir(d): + for dirpath, dirs, files in os.walk(d): + for f in files: + if re.match(r'(?!lt_).+\.(c|cc|h)$', f): + file = os.path.join(dirpath, f) + find_typedefs(file) + + +if __name__ == '__main__': + if len(sys.argv[1:]) == 0: + arg = os.getcwd() + else: + arg = sys.argv[1] + scan_dir(arg) diff --git a/contrib/scripts/gdb-iterate-dll.py b/contrib/scripts/gdb-iterate-dll.py new file mode 100644 index 000000000..79d46aa96 --- /dev/null +++ b/contrib/scripts/gdb-iterate-dll.py @@ -0,0 +1,40 @@ +from gdb import * + + +def search_dll(head, field, match, pfield): + """ + Search in a DLL by iterates over it. + + head: name of the symbol denoting the head of the DLL + field: the field that should be search for match + match: the mathing value for field + pfield: the field whose value is to be printed for matched elements; None to + print all fields of the matched elemented + """ + + (symbol, _) = lookup_symbol(head) + if symbol is None: + print("Can't find symbol: " + head) + return + symbol_val = symbol.value() + while symbol_val: + symbol_val_def = symbol_val.dereference() + field_val = symbol_val_def[field] + if field_val.type.code == gdb.TYPE_CODE_INT: + val = int(field_val) + res = (match == val) + elif (field_val.type.code == gdb.TYPE_CODE_STRING) or (field_val.type.code == gdb.TYPE_CODE_ARRAY): + val = str(field_val) + res = (match == val) + elif (field_val.type.code == gdb.TYPE_CODE_TYPEDEF): + val = str(field_val) + res = match in val + else: + continue + + if res: + if pfield is None: + print(symbol_val_def) + else: + print(symbol_val_def[pfield]) + symbol_val = symbol_val_def["next"] diff --git a/contrib/scripts/generate-monkey-db.sh b/contrib/scripts/generate-monkey-db.sh new file mode 100755 index 000000000..2afe55501 --- /dev/null +++ b/contrib/scripts/generate-monkey-db.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +BASEPATH="$(dirname $0)" +OLDDIR="${pwd}" +GN_HOME="/usr/local/bin" + +export CC="cparser" +export CFLAGS="-m32 --seaspider" + +cd $BASEPATH/.. && ./configure --prefix=$GN_HOME --with-extractor=$GN_HOME --with-microhttpd=$GN_HOME --with-libgcrypt=$GN_HOME && make && seaspider +if test "$?" -ne 0 +then + echo "FAIL: building GNUnet" + exit 1 +fi + +cd $OLDDIR diff --git a/contrib/scripts/gnunet-chk.py.in b/contrib/scripts/gnunet-chk.py.in new file mode 100755 index 000000000..c976b2143 --- /dev/null +++ b/contrib/scripts/gnunet-chk.py.in @@ -0,0 +1,383 @@ +#!@PYTHON@ +# This file is part of GNUnet. +# (C) 2013, 2018 Christian Grothoff (and other contributing authors) +# +# GNUnet is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published +# by the Free Software Foundation; either version 3, or (at your +# option) any later version. +# +# GNUnet is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNUnet; see the file COPYING. If not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# File: gnunet-chk.py +# Brief: Computes GNUNET style Content Hash Key for a given file +# Author: Sree Harsha Totakura + +from hashlib import sha512 +import logging +import os +import getopt +import sys +from Crypto.Cipher import AES +from functools import reduce + + +# Defaults +DBLOCK_SIZE = (32 * 1024) # Data block size + +# Pick a multiple of 2 here to achive 8-byte alignment! We also +# probably want DBlocks to have (roughly) the same size as IBlocks. +# With SHA-512, the optimal value is 32768 byte / 128 byte = 256 (128 +# byte = 2 * 512 bits). DO NOT CHANGE! +CHK_PER_INODE = 256 + +CHK_HASH_SIZE = 64 # SHA-512 hash = 512 bits = 64 bytes + +CHK_QUERY_SIZE = CHK_HASH_SIZE # Again a SHA-512 hash + +GNUNET_FS_URI_PREFIX = "gnunet://fs/" # FS CHK URI prefix + +GNUNET_FS_URI_CHK_INFIX = "chk/" # FS CHK URI infix + + +def encode_data_to_string(data): + """Returns an ASCII encoding of the given data block like + GNUNET_STRINGS_data_to_string() function. + + data: A bytearray representing the block of data which has to be encoded + """ + echart = "0123456789ABCDEFGHIJKLMNOPQRSTUV" + assert (None != data) + assert (bytearray == type(data)) + size = len(data) + assert (0 != size) + vbit = 0 + wpos = 0 + rpos = 0 + bits = 0 + out = "" + while (rpos < size) or (vbit > 0): + if (rpos < size) and (vbit < 5): + bits = (bits << 8) | data[rpos] # eat 8 more bits + rpos += 1 + vbit += 8 + if (vbit < 5): + bits <<= (5 - vbit) # zero-padding + assert (vbit == ((size * 8) % 5)) + vbit = 5 + out += echart[(bits >> (vbit - 5)) & 31] + wpos += 1 + vbit -= 5 + assert (0 == vbit) + return out + + +def sha512_hash(data): + """ Returns the sha512 hash of the given data. + + data: string to hash + """ + hash_obj = sha512() + hash_obj.update(data) + return hash_obj.digest() + + +class AESKey: + """Class for AES Keys. Contains the main key and the initialization + vector. """ + + key = None # The actual AES key + iv = None # The initialization vector + cipher = None # The cipher object + KEY_SIZE = 32 # AES 256-bit key = 32 bytes + IV_SIZE = AES.block_size # Initialization vector size (= AES block size) + + def __init__(self, passphrase): + """Creates a new AES key. + + passphrase: string containing the passphrase to get the AES key and + initialization vector + """ + passphrase = bytearray(passphrase) + self.key = bytearray(self.KEY_SIZE) + self.iv = bytearray(self.IV_SIZE) + if (len(passphrase) > self.KEY_SIZE): + self.key = passphrase[:self.KEY_SIZE] + passphrase = passphrase[self.KEY_SIZE:] + if (len(passphrase) > self.IV_SIZE): + self.iv = passphrase[:self.IV_SIZE] + else: + self.iv[0:len(passphrase)] = passphrase + else: + self.key[0:len(passphrase)] = passphrase + self.key = str(self.key) + self.iv = str(self.iv) + assert (len(self.key) == self.KEY_SIZE) + assert (len(self.iv) == self.IV_SIZE) + + +def setup_aes_cipher_(aes_key): + """Initializes the AES object with settings similar to those in GNUnet. + + aes_key: the AESKey object + Returns the newly initialized AES object + """ + return AES.new(aes_key.key, AES.MODE_CFB, aes_key.iv, segment_size=128) + + +def aes_pad_(data): + """Adds padding to the data such that the size of the data is a multiple of + 16 bytes + + data: the data string + Returns a tuple:(pad_len, data). pad_len denotes the number of bytes added + as padding; data is the new data string with padded bytes at the end + """ + pad_len = len(data) % 16 + if (0 != pad_len): + pad_len = 16 - pad_len + pad_bytes = bytearray(15) + data += str(pad_bytes[:pad_len]) + return (pad_len, data) + + +def aes_encrypt(aes_key, data): + """Encrypts the given data using AES. + + aes_key: the AESKey object to use for AES encryption + data: the data string to encrypt + """ + (pad_len, data) = aes_pad_(data) + cipher = setup_aes_cipher_(aes_key) + enc_data = cipher.encrypt(data) + if (0 != pad_len): + enc_data = enc_data[:-pad_len] + return enc_data + + +def aes_decrypt(aes_key, data): + """Decrypts the given data using AES + + aes_key: the AESKey object to use for AES decryption + data: the data string to decrypt + """ + (pad_len, data) = aes_pad_(data) + cipher = setup_aes_cipher_(aes_key) + ptext = cipher.decrypt(data) + if (0 != pad_len): + ptext = ptext[:-pad_len] + return ptext + + +class Chk: + """Class for the content hash key.""" + key = None + query = None + fsize = None + + def __init__(self, key, query): + assert (len(key) == CHK_HASH_SIZE) + assert (len(query) == CHK_QUERY_SIZE) + self.key = key + self.query = query + + def setSize(self, size): + self.fsize = size + + def uri(self): + sizestr = repr(self.fsize) + if isinstance(self.fsize, int): + sizestr = sizestr[:-1] + return GNUNET_FS_URI_PREFIX + GNUNET_FS_URI_CHK_INFIX + \ + encode_data_to_string(bytearray(self.key)) + "." + \ + encode_data_to_string(bytearray(self.query)) + "." + \ + sizestr + + +def compute_depth_(size): + """Computes the depth of the hash tree. + + size: the size of the file whose tree's depth has to be computed + Returns the depth of the tree. Always > 0. + """ + depth = 1 + fl = DBLOCK_SIZE + while (fl < size): + depth += 1 + if ((fl * CHK_PER_INODE) < fl): + return depth + fl = fl * CHK_PER_INODE + return depth + + +def compute_tree_size_(depth): + """Calculate how many bytes of payload a block tree of the given depth MAY + correspond to at most (this function ignores the fact that some blocks will + only be present partially due to the total file size cutting some blocks + off at the end). + + depth: depth of the block. depth==0 is a DBLOCK. + Returns the number of bytes of payload a subtree of this depth may + correspond to. + """ + rsize = DBLOCK_SIZE + for cnt in range(0, depth): + rsize *= CHK_PER_INODE + return rsize + + +def compute_chk_offset_(depth, end_offset): + """Compute the offset of the CHK for the current block in the IBlock + above + + depth: depth of the IBlock in the tree (aka overall number of tree levels + minus depth); 0 == DBLOCK + end_offset: current offset in the overall file, at the *beginning* of the + block for DBLOCK (depth == 0), otherwise at the *end* of the + block (exclusive) + Returns the offset in the list of CHKs in the above IBlock + """ + bds = compute_tree_size_(depth) + if (depth > 0): + end_offset -= 1 + ret = end_offset / bds + return ret % CHK_PER_INODE + + +def compute_iblock_size_(depth, offset): + """Compute the size of the current IBLOCK. The encoder is triggering the + calculation of the size of an IBLOCK at the *end* (hence end_offset) of its + construction. The IBLOCK maybe a full or a partial IBLOCK, and this + function is to calculate how long it should be. + + depth: depth of the IBlock in the tree, 0 would be a DBLOCK, must be > 0 + (this function is for IBLOCKs only!) + offset: current offset in the payload (!) of the overall file, must be > 0 + (since this function is called at the end of a block). + Returns the number of elements to be in the corresponding IBlock + """ + assert (depth > 0) + assert (offset > 0) + bds = compute_tree_size_(depth) + mod = offset % bds + if mod is 0: + ret = CHK_PER_INODE + else: + bds /= CHK_PER_INODE + ret = mod / bds + if (mod % bds) is not 0: + ret += 1 + return ret + + +def compute_rootchk(readin, size): + """Returns the content hash key after generating the hash tree for the given + input stream. + + readin: the stream where to read data from + size: the size of data to be read + """ + depth = compute_depth_(size) + current_depth = 0 + chks = [None] * (depth * CHK_PER_INODE) # list buffer + read_offset = 0 + logging.debug("Begining to calculate tree hash with depth: " + repr(depth)) + while True: + if (depth == current_depth): + off = CHK_PER_INODE * (depth - 1) + assert (chks[off] is not None) + logging.debug("Encoding done, reading CHK `" + chks[off].query + \ + "' from " + repr(off) + "\n") + uri_chk = chks[off] + assert (size == read_offset) + uri_chk.setSize(size) + return uri_chk + if (0 == current_depth): + pt_size = min(DBLOCK_SIZE, size - read_offset) + try: + pt_block = readin.read(pt_size) + except IOError: + logging.warning("Error reading input file stream") + return None + else: + pt_elements = compute_iblock_size_(current_depth, read_offset) + pt_block = "" + pt_block = \ + reduce((lambda ba, chk: + ba + (chk.key + chk.query)), + chks[(current_depth - 1) * CHK_PER_INODE:][:pt_elements], + pt_block) + pt_size = pt_elements * (CHK_HASH_SIZE + CHK_QUERY_SIZE) + assert (len(pt_block) == pt_size) + assert (pt_size <= DBLOCK_SIZE) + off = compute_chk_offset_(current_depth, read_offset) + logging.debug("Encoding data at offset " + repr(read_offset) + \ + " and depth " + repr(current_depth) + " with block " \ + "size " + repr(pt_size) + " and target CHK offset " + \ + repr(current_depth * CHK_PER_INODE)) + pt_hash = sha512_hash(pt_block) + pt_aes_key = AESKey(pt_hash) + pt_enc = aes_encrypt(pt_aes_key, pt_block) + pt_enc_hash = sha512_hash(pt_enc) + chk = Chk(pt_hash, pt_enc_hash) + chks[(current_depth * CHK_PER_INODE) + off] = chk + if (0 == current_depth): + read_offset += pt_size + if (read_offset == size) or \ + (0 == (read_offset % (CHK_PER_INODE * DBLOCK_SIZE))): + current_depth += 1 + else: + if (CHK_PER_INODE == off) or (read_offset == size): + current_depth += 1 + else: + current_depth = 0 + + +def chkuri_from_path(path): + """Returns the CHK URI of the file at the given path. + + path: the path of the file whose CHK has to be calculated + """ + size = os.path.getsize(path) + readin = open(path, "rb") + chk = compute_rootchk(readin, size) + readin.close() + return chk.uri() + + +def usage(): + """Prints help about using this script.""" + print(""" +Usage: gnunet-chk.py [options] file +Prints the Content Hash Key of given file in GNUNET-style URI. + +Options: + -h, --help : prints this message +""") + + +if '__main__' == __name__: + try: + opts, args = getopt.getopt(sys.argv[1:], "h", ["help"]) + except getopt.GetoptError as err: + print(err) + print("Exception occured") + usage() + sys.exit(2) + for option, value in opts: + if option in("-h", "--help"): + usage() + sys.exit(0) + if len(args) != 1: + print("Incorrect number of arguments passed") + usage() + sys.exit(1) + print(chkuri_from_path(args[0])) diff --git a/contrib/scripts/gnunet-logread/gnunet-logread b/contrib/scripts/gnunet-logread/gnunet-logread new file mode 100755 index 000000000..9b1c65401 --- /dev/null +++ b/contrib/scripts/gnunet-logread/gnunet-logread @@ -0,0 +1,198 @@ +#!@PERL@ +# helper tool to make gnunet logs more readable +# try 'gnunet-logread -h' for usage + +use strict; +use warnings; +my $DEFAULT_SOCKET = '/tmp/gnunet-logread-ipc.sock'; + +print STDERR < 0, ERROR => 1, WARNING => 2, INFO => 4, DEBUG => 8 ); + +# Message type numbers to names +my %msgtypes; +my $prefix = $ENV{GNUNET_PREFIX} || '/usr'; +my $filename = "$prefix/include/gnunet/gnunet_protocols.h"; +$ipc = $opts{s} || $DEFAULT_SOCKET; + +if (open HEADER, $filename) +{ + while (
) + { + $msgtypes{$2} = $1 if /^\s*#define\s+GNUNET_MESSAGE_TYPE_(\w+)\s+(\d+)/i; + } + close HEADER; +} else { + warn <', $ipc or die "Cannot write to $ipc: $!"; +} + +if (exists $opts{f}) { + open(I, $ipc) or die "Cannot read from $ipc: $!"; + &perform while ; + close I; +} else { + &perform while <>; +} +fileno O and close O; +exit; + + +sub perform { + if (fileno O) { + my ($time, $type, $size, $from, $to, $level, $msg); + if (($time, $type, $size, $from, $to) = + /^([A-Z][a-z]{2}\ .[0-9]\ [0-9:]{8}(?:-[0-9]{6})?)\ util-client-.*\b + (?: Received | Transmitting )\ message \b.*?\b + type \s+ (\d+) \b.*?\b + size \s+ (\d+) \b.*?\b + (?: from \s+ (\S+) + | to \s+ (\S+) ) /x) + { + $from ||= $name; + $to ||= $name; + my ($time, $type, $size, $from, $to) = ($1, $2, $3, + $4 || $name, $5 || $name); + my $msg = exists $msgtypes{$type} ? $msgtypes{$type} : $type; + my $ofh = select O; + print O "$time\t$from -> $to\t$msg ($size)\n"; + $| = 1; + select $ofh; + } + if (($time, $level, $msg) = + /^([A-Z][a-z]{2}\ .[0-9]\ [0-9:]{8}(?:-[0-9]{6})?) + \s+\S+\s+(\S+)\s+(.+)/x + and (exists $levels{$level} + && $levels{$level} <= $msg_level + && (!defined $msg_regex || $msg =~ /$msg_regex/i))) + { + print O "$time\t$name\t$level: $msg\n"; + } + } + return if $opts{x} and /$opts{x}/io; + return if $opts{i} and not /$opts{i}/io; + + # Timestamp (e.g. Nov 01 19:36:11-384136) + s/^([A-Z][a-z]{2} .[0-9] [0-9:]{8}(?:-[0-9]{6})?)/YELLOW $1/e; + + # Log levels + s/\b(ERROR )\b/RED $1/ex; + s/\b(WARNING)\b/YELLOW $1/ex; + s/\b(INFO )\b/GREEN $1/ex; + s/\b(DEBUG )\b/BRIGHT_BLACK $1/ex; + + # Service names + # TODO: might read the list from $GNUNET_PREFIX/libexec/gnunet/ + s/\b(multicast|psyc|psycstore|social)\b/BLUE $1/gex; + + # Add message type names + s/(\s+type\s+)(\d+)/ + $1 . BRIGHT_CYAN (exists $msgtypes{$2} ? $msgtypes{$2} : 'UNKNOWN') . + CYAN " ($2)"/gei; + + # logread-ipc output + s/(\s+)([A-Z_]+)( \(\d+\))$/$1 . BRIGHT_CYAN $2 . CYAN $3/e; + + print; +} + +__END__ + +=pod + +=head1 NAME + +gnunet-logread - a GNUnet log analyzer, colorizer and aggregator + +=head1 SYNOPSIS + + |& $0 [] + or + $0 [] [] + + Options: + -f Follow input from IPC FIFO socket. + + Regular screen output options: + -i Include only messages that match . + -x Exclude all messages that match . + -q Quiet: Do not show usage advice to new users. + + Options to forward messages to the IPC FIFO socket: + -n Name of the component we are forwarding messages for. + -s Default = $DEFAULT_SOCKET + -L Minimum level of messages to forward: + Log levels: NONE, ERROR, WARNING, INFO, DEBUG. + -m Only forward messages matching a regular expression. + + See 'perldoc gnunet-logread' for a longer explanation. + +=head1 MOTIVATION + +GNUnet debug logs are a tedious read, but given a complex system that we +cannot run all parts of in a debugger all the time, some gathering and +structuring of events and message passing is useful. + +At first, this tool simply makes logs easier to read. Both if viewed in +real-time or taken from disk. Then it also allows to extract all message +passing events from it and forward them to a special process that aggregates +all message passing events and therefore helps you make sense of all the +inter-process communication (IPC) happening between the various pieces of +the GNUnet system beast. + +That master process is simply an extra gnunet-logread that you run in a +separate window and adorn it with the '-f' flag. The submitting processes +instead need to be given a '-n' flag. That is because from the GNUnet logs +it isn't clear which process events belong to. For example you may be +having events taking place in the 'util' subsystem of gnunet-psyc-service +just as much as in the 'util' subsystem of gnunet-multicast-service. In +order to make sense of them it is necessary to manually add that info. This +could be remedied by extending the semantics of the GNUNET_log facility +instead, but that is still subject to further consideration. + +=head1 AUTHORS + +tg & lynX diff --git a/contrib/scripts/gnunet-logread/gnunet-logread-ipc b/contrib/scripts/gnunet-logread/gnunet-logread-ipc new file mode 100755 index 000000000..72f9f47df --- /dev/null +++ b/contrib/scripts/gnunet-logread/gnunet-logread-ipc @@ -0,0 +1,10 @@ +#!/bin/sh +# +# Usage: gnunet-logread-ipc | gnunet-logread +# +# ... obsoleted by gnunet-logread's new -f option that does the same thing + +# FIXME: Replace /tmp with our use of $TMPDIR and similar. +ipc=${1:-/tmp/gnunet-logread-ipc.sock} +test -e "$ipc" || mkfifo "$ipc" +cat "$ipc" diff --git a/contrib/scripts/gnunet-logread/gnunet-logread-ipc-sdedit b/contrib/scripts/gnunet-logread/gnunet-logread-ipc-sdedit new file mode 100755 index 000000000..f8b7dc735 --- /dev/null +++ b/contrib/scripts/gnunet-logread/gnunet-logread-ipc-sdedit @@ -0,0 +1,60 @@ +#!@PERL@ + +# 1. Start sdedit and enable 'RT diagram server' in 'Global preferences'. +# +# 2. Start this tool (see defaults below): +# gnunet-logread-ipc-sdedit -n buffer-name -i /path/to/ipc.sock -h -p +# +# 3. Start a gnunet-logread instance for each component with the -n option + +use strict; +use warnings; + +use Getopt::Std; +use IO::Socket::INET; +use POSIX qw(mkfifo); + +my %opts; +getopts ('i:n:h:p:', \%opts); + +my $ipc = $opts{i} || '/tmp/gnunet-logread-ipc.sock'; +my $name = $opts{n} || 'gnunet'; +my $host = $opts{h} || 'localhost'; +my $port = $opts{p} || 16001; +my %svcs = map { $_ => 1 } @ARGV; + +my $sdedit = IO::Socket::INET->new(PeerAddr => $host, + PeerPort => $port, + Proto => 'tcp') + or die "Cannot connect to $host:$port: $!\n"; + +print $sdedit "$name\n"; +print $sdedit "_t:time[e]\n"; +print $sdedit "$_:$_\[ap\] \"$_\"\n" for @ARGV; +print $sdedit "_e:ext[e]\n"; +print $sdedit "\n"; + +mkfifo $ipc, 0600 or die "$ipc: $!\n" unless -e $ipc; +open IPC, '<', $ipc or die "$ipc: $!\n"; +$| = 1; +while () +{ + print; + my ($time, $from, $to, $msg, $svc); + if (my ($time, $from, $to, $msg) = + /^([A-Z][a-z]{2}\ .[0-9]\ [0-9:]{8}(?:-[0-9]{6})?)\s+ + (\S+)\s+ -> \s+(\S+)\s+ (\S+\s+ \(\d+\))/x) + { + $from = '_e' unless exists $svcs{$from}; + $to = '_e' unless exists $svcs{$to}; + print $sdedit "*0 _t\n$time\n*0\n", "$from:$to.$msg\n" + } + elsif (($time, $svc, $msg) = + /^([A-Z][a-z]{2}\ .[0-9]\ [0-9:]{8}(?:-[0-9]{6})?)\s+ + (\S+)\s+(.+)/x) + { + print $sdedit "*0 _t\n$time\n*0\n", "*0 $svc\n$msg\n*0\n" + } +} + +close IPC; diff --git a/contrib/scripts/gnunet-suidfix b/contrib/scripts/gnunet-suidfix new file mode 100755 index 000000000..992378966 --- /dev/null +++ b/contrib/scripts/gnunet-suidfix @@ -0,0 +1,27 @@ +#!/bin/sh +# +# "suidfix" is german and it means something like immediate suicide. + +# taken from dangole's lede config.. thx! +suid_root_helpers="exit nat-server nat-client transport-bluetooth transport-wlan vpn" +libexec="${GNUNET_PREFIX}/lib/gnunet/libexec" + +chmodown_execbin() { + if [ -x $1 ]; then + if [ "$3" ]; then + chown $3 $1 2>/dev/null && chmod $2 $1 + else + chmod $2 $1 + fi + ls -l $1 + else + echo "Missing: $1" + fi +} + +for helper in $suid_root_helpers; do + chmodown_execbin ${libexec}/gnunet-helper-$helper u+s +done +chmodown_execbin ${libexec}/gnunet-helper-dns 4750 root:gnunetdns +chmodown_execbin ${libexec}/gnunet-service-dns 2750 gnunet:gnunetdns + diff --git a/contrib/scripts/gnunet_janitor.py.in b/contrib/scripts/gnunet_janitor.py.in new file mode 100644 index 000000000..74fc70886 --- /dev/null +++ b/contrib/scripts/gnunet_janitor.py.in @@ -0,0 +1,78 @@ +#!@PYTHON@ +# This file is part of GNUnet. +# (C) 2011 Christian Grothoff (and other contributing authors) +# +# GNUnet is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published +# by the Free Software Foundation; either version 2, or (at your +# option) any later version. +# +# GNUnet is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNUnet; see the file COPYING. If not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# Finds any gnunet processes still running in the system and kills them +# +# gnunet janitor can be used by invoking `make' like this: +# TESTS_ENVIRONMENT='${top_srcdir}/contrib/gnunet_janitor.py &&' make check + +from __future__ import print_function +import os +import re +import subprocess +import sys +import shutil +import time +import signal +import terminate + +if os.name == 'nt': + from win32com.client import GetObject + WMI = GetObject('winmgmts:') + +def get_process_list (): + result = [] + if os.name == 'nt': + processes = WMI.InstancesOf('Win32_Process') + for p in processes: + result.append ((p.Properties_('ProcessId').Value, re.sub (r'(.+)\.exe', r'\1', p.Properties_('Name').Value))) + else: + pids = [pid for pid in os.listdir('/proc') if pid.isdigit ()] + for pid in pids: + with open (os.path.join ('/proc', pid, 'cmdline'), 'rb') as p: + cmdline = p.read ().split ('\x00') + if len (cmdline) > 0: + result.append ((pid, cmdline[0])) + return result + +def main (): + procs = get_process_list () + gnunet_procs = [] + for p in procs: + if re.match (r'gnunet-.+', p[1]): + gnunet_procs.append (p) + for p in gnunet_procs: + if re.match (r'gnunet-service-arm', p[1]): + print ("killing arm process {0:5} {1}".format (p[0], p[1])) + try: + terminate.safe_terminate_process_by_pid (int (p[0]), 1) + except OSError as e: + print ("failed: {0}".format (e)) + pass + for p in gnunet_procs: + if not re.match (r'gnunet-service-arm', p[1]): + print ("killing non-arm process {0:5} {1}".format (p[0], p[1])) + try: + terminate.safe_terminate_process_by_pid (int (p[0]), 1) + except OSError as e: + print ("failed: {0}".format (e)) + pass + +if __name__ == '__main__': + sys.exit (main ()) diff --git a/contrib/scripts/gnunet_pyexpect.py.in b/contrib/scripts/gnunet_pyexpect.py.in new file mode 100644 index 000000000..cfeb06d8d --- /dev/null +++ b/contrib/scripts/gnunet_pyexpect.py.in @@ -0,0 +1,83 @@ +#!@PYTHON@ +# This file is part of GNUnet. +# (C) 2010 Christian Grothoff (and other contributing authors) +# +# GNUnet is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published +# by the Free Software Foundation; either version 2, or (at your +# option) any later version. +# +# GNUnet is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNUnet; see the file COPYING. If not, write to the +# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. +# +# Testcase for gnunet-peerinfo +from __future__ import print_function +import os +import re +import subprocess +import sys +import shutil +import time + +class pexpect (object): + def __init__ (self): + super (pexpect, self).__init__ () + + def spawn (self, stdin, arglist, *pargs, **kwargs): + env = kwargs.pop ('env', None) + if env is None: + env = os.environ.copy () + # This messes up some testcases, disable log redirection + env.pop ('GNUNET_FORCE_LOGFILE', None) + self.proc = subprocess.Popen (arglist, *pargs, env=env, **kwargs) + if self.proc is None: + print ("Failed to spawn a process {0}".format (arglist)) + sys.exit (1) + if stdin is not None: + self.stdo, self.stde = self.proc.communicate (stdin) + else: + self.stdo, self.stde = self.proc.communicate () + return self.proc + + def expect (self, s, r, flags=0): + stream = self.stdo if s == 'stdout' else self.stde + if isinstance (r, str): + if r == "EOF": + if len (stream) == 0: + return True + else: + print ("Failed to find `{1}' in {0}, which is `{2}' ({3})".format (s, r, stream, len (stream))) + sys.exit (2) + raise ValueError ("Argument `r' should be an instance of re.RegexObject or a special string, but is `{0}'".format (r)) + m = r.search (stream.decode(), flags) + if not m: + print ("Failed to find `{1}' in {0}, which is is `{2}'".format (s, r.pattern, stream)) + sys.exit (2) + stream = stream[m.end ():] + if s == 'stdout': + self.stdo = stream + else: + self.stde = stream + return m + + def read (self, s, size=-1): + stream = self.stdo if s == 'stdout' else self.stde + result = "" + if size < 0: + result = stream + new_stream = "" + else: + result = stream[0:size] + new_stream = stream[size:] + if s == 'stdout': + self.stdo = new_stream + else: + self.stde = new_stream + return result diff --git a/contrib/scripts/process_log.sh b/contrib/scripts/process_log.sh new file mode 100755 index 000000000..c25c515c2 --- /dev/null +++ b/contrib/scripts/process_log.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Usage: service should print "STARTING SERVICE (srvc) for peer [PEER]" where: +# - "srvc" is the service name (in lowercase, as in the log output). +# It cannot contain parenthesis in its name. +# - "PEER" is the peer ID. Should be 4 alfanumeric characters + +grep "STARTING SERVICE " log > __tmp_peers + +SED_EXPR="" +while read -r line; do + SRVC=`echo "$line" | sed -e 's/.*(\([^)]*\)).*/\1/'` + PEER=`echo "$line" | sed -e 's/.*\[\(....\)\].*/\1/'` + PID=`echo "$line" | sed -e "s/.*$SRVC-\([0-9]*\).*/\1/"` + echo "$SRVC $PID => $PEER" + + SED_EXPR="${SED_EXPR}s/$SRVC-\([a-z2]*\)-$PID/$SRVC \1 $PEER/;" + SED_EXPR="${SED_EXPR}s/$SRVC-$PID/$SRVC XXX $PEER/;" + SED_EXPR="${SED_EXPR}s/$SRVC-api-[0-9]/$SRVC-api- /;" +done < __tmp_peers +rm __tmp_peers + +sed -e "$SED_EXPR" log > .log +echo "$0 sed regex: $SED_EXPR" >> .log + +SIZE=`stat -c%s .log` + +if [[ "`ps aux | grep "kwrite .lo[g]"`" = "" && "$SIZE" < 10000000 ]]; then + kwrite .log --geometry 960x1140-960 & +fi diff --git a/contrib/scripts/pydiffer.py.in b/contrib/scripts/pydiffer.py.in new file mode 100644 index 000000000..10145371c --- /dev/null +++ b/contrib/scripts/pydiffer.py.in @@ -0,0 +1,44 @@ +#!@PYTHON@ +import os +import sys +import difflib +import filecmp + + +def getdiff(old, new): + diff = [] + with open(old) as a: + with open(new) as b: + for l in difflib.unified_diff(a.read().splitlines(), b.read().splitlines()): + diff.append(l) + return diff + + +def dc_getdiff(dc, old, new): + diff = [] + for f in dc.left_only: + diff.append("Only in {}: {}".format(old, f)) + for f in dc.right_only: + diff.append("Only in {}: {}".format(new, f)) + for f in dc.diff_files: + r = getdiff(os.path.join(old, f), os.path.join(new, f)) + diff.extend(r) + for dn, dc in dc.subdirs.items(): + r = dc_getdiff(dc, os.path.join(old, dn), os.path.join(new, dn)) + diff.extend(r) + return diff + + +def dcdiff(old, new): + dc = filecmp.dircmp(old, new) + diff = dc_getdiff(dc, old, new) + return diff + + +def main(): + for l in dcdiff(sys.argv[1], sys.argv[2]): + print(l) + + +if __name__ == '__main__': + main() diff --git a/contrib/scripts/pydmesg b/contrib/scripts/pydmesg new file mode 100755 index 000000000..d60e08fe3 --- /dev/null +++ b/contrib/scripts/pydmesg @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# coding=utf8 + +# Copyright (C) 2010 Saúl ibarra Corretgé +# + +""" +pydmesg: dmesg with human-readable timestamps +""" + +from __future__ import with_statement + +import re +import subprocess +import sys + +from datetime import datetime, timedelta + + +_datetime_format = "%Y-%m-%d %H:%M:%S" +_dmesg_line_regex = re.compile("^\[(?P