1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859 |
- #!/usr/bin/python3
- #
- # Maximilian Wilhelm <max@rfc2324.org>
- # -- Tue 20 Jun 2017 06:40:18 PM CEST
- #
- import argparse
- import json
- import os
- import sys
- import time
- parser = argparse.ArgumentParser (description = 'Merge nodes.json files')
- parser.add_argument ('files', help = 'Path for nodes.json file(s)', nargs = '+')
- parser.add_argument ('--pretty-print', help = 'Pretty-print JSON output', action = 'store_true')
- args = parser.parse_args ()
- all_nodes = {}
- uberdict = {}
- # Read all nodes lists into all_nodes dict, thereby dropping any duplicate nodes.
- for file_path in args.files:
- try:
- with open (file_path, 'rb') as fh:
- nodes = json.load (fh)
- except IOError as e:
- print (f"Error while reading file '{file_path}': {str(e)}")
- sys.exit (1)
- for node in nodes['nodes']:
- node_id = node['nodeinfo']['node_id']
- # If node_id has already been seen make sure to use the newer entry
- if node_id in all_nodes:
- try:
- node_lastseen = time.strptime (node['lastseen'], "%Y-%m-%dT%H:%M:%S%z")
- existing_node_lastseen = time.strptime (existing_node_lastseen['lastseen'], "%Y-%m-%dT%H:%M:%S%z")
- # If the node information already stored in all_nodes is more
- # recent than the node we just found, don't overwrite it.
- if existing_node_lastseen > node_lastseen:
- continue
- except Exception:
- # If parsing a timestamp fails just carry on
- continue
- all_nodes[node['nodeinfo']['node_id']] = node
- for key in nodes.keys ():
- if key != 'nodes':
- uberdict[key] = nodes[key]
- uberdict['nodes'] = list(all_nodes.values ())
- # Print merged nodes.json's to stdout
- if args.pretty_print:
- print (json.dumps (uberdict, sort_keys = True, indent = 4, separators = (',', ': ')))
- else:
- print (json.dumps (uberdict))
|