import os from functools import partial from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor from scrapli import Scrapli # from scrapli.driver import GenericDriver # from scrapli.driver.core import IOSXEDriver from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany import re import json def cisco_version(collection, command_output, device_record, connection): print('run cisco_version') def device_audit(): return {'os_flavour': operating_system, 'image': image, 'os_version': version, 'chassis': chassis, 'serial': serial} id = device_record['_id'] output = command_output['output'] if not output == 'error': if len(output.genie_parse_output()) >0: print(f'genie parser method') parsed = output.genie_parse_output() operating_system = parsed['version']['os'] image = parsed['version']['system_image'].split(':')[1].replace('/', '') version = parsed['version']['version'] chassis = parsed['version']['chassis'] serial = parsed['version']['chassis_sn'] record = device_audit() # print(record) filter = {'_id': id} result = collection.update_one(filter, {'$set': record}, upsert=True) return result else: print(f'manual parse method not written for this command') #### scrape 1) # 'show isakmp sa detail' # {'c_id': '20681', 'local_ip': '10.227.184.157', 'p1_ivrf': 'none', 'peer_ip': '10.229.4.74', 'p1_dh_group': '14', 'p1_encr_algo': 'aes', 'p1_hash_algo': 'sha', 'p1_auth_type': 'psk', 'p1_status': 'ACTIVE'} def cisco_vpn_phase1(collection, command_output, device_record, connection): print('\ncisco_vpn_phase1') def process_p1(p1_dict, idx): global peer_count global p1_records c_id = str(p1_dict['isakmp_stats']['IPv4'][idx]['c_id']) local_ip = p1_dict['isakmp_stats']['IPv4'][idx]['local_ip'] peer_ip = p1_dict['isakmp_stats']['IPv4'][idx]['remote_ip'] encr_algo = p1_dict['isakmp_stats']['IPv4'][idx]['encr_algo'] hash_algo = p1_dict['isakmp_stats']['IPv4'][idx]['hash_algo'] auth_type = p1_dict['isakmp_stats']['IPv4'][idx]['auth_type'] dh_group = str(p1_dict['isakmp_stats']['IPv4'][idx]['dh_group']) status = p1_dict['isakmp_stats']['IPv4'][idx]['status'] ivrf = p1_dict['isakmp_stats']['IPv4'][idx]['ivrf'] if 'ivrf' in p1_dict['isakmp_stats']['IPv4'][idx] else 'none' p1_record = {'c_id': c_id, 'local_ip': local_ip, 'p1_ivrf': ivrf, 'peer_ip': peer_ip, 'p1_dh_group': dh_group, 'p1_encr_algo': encr_algo, 'p1_hash_algo': hash_algo, 'p1_auth_type': auth_type, 'p1_status': status } # print(f'genie p1 {p1_records}') p1_records.append(p1_record) peer_count += 1 print('phase1 processed %d\r'%peer_count, end="") device_name = device_record['DeviceName'] output = command_output['output'] device_table = collection['temp'][device_name] # create/use-existing temp subcollection if not output == 'error': p1_dict = output.genie_parse_output() # print(json.dumps(p1_dict, indent=4)) if len(p1_dict) >0: global peer_count peer_count = 0 global p1_records p1_records = [] p1_scrape_idx = [t for t in p1_dict['isakmp_stats']['IPv4']] n_cores = os.cpu_count() partial_function = partial(process_p1, p1_dict) # with ThreadPoolExecutor(max_workers=1) as executor: # debug with ThreadPoolExecutor(max_workers=n_cores) as executor: executor.map(partial_function, p1_scrape_idx) print(f'phase1 processed {peer_count}') # write to db if len(p1_records) >0: requests = [] for i in p1_records: record = i # requests.append(InsertOne(record)) filter = {'c_id': record['c_id']} requests.append(UpdateMany(filter, {'$set': record}, upsert=True)) result = device_table.bulk_write(requests) # print(result.bulk_api_result) return result else: print('phase1 no tunnel records') else: print('error returning command, check network connectivity') #### scrape 2) # 'show crypto session remote {ip} detail' # {'local_ip': '10.225.112.42', 'local_port': '500', 'c_id': '11907', 'ipsec_flow': ['permit 47 host 10.225.112.42 host 10.227.36.18'], 'crypto_session_interface': 'Tunnel6', 'session_status': 'UP-ACTIVE', 'peer_ip': '10.227.36.18', 'peer_port': '500', 'p2_fvrf': 'none', 'peer_vpn_id': '10.227.36.18'} # correlate to scrape 1) with key 'c_id' def cisco_crypto_session(collection, command_output, device_record, connection): print('\ncisco_crypto_session') def process_session(session): # debug with single thread # print('\n##########') # print(session) if not 'Invalid input detected at' in session: # occurs with no match on peer ip # wipe all lines before first match of 'Interface: ', this match is used to delimit lines of text into subsequent interface scrapes scrape = "" tag_found = False for line in session.split('\n'): if not tag_found: if 'Interface: ' in line: scrape += f'{line}\n' tag_found = True else: scrape += f'{line}\n' # split scrapes into multiple interfaces, each interface entry may have many sessions interfaces = [] try: if len(scrape) >0: for line in scrape.split('\n'): # this can fail but only on a huge scrape and hard to see, mep-shared-rri-agg09 if 'Interface: ' in line: interfaces.append(f'{line}\n') else: interfaces[-1] += f'{line}\n' except Exception as e: print(f'failed to process scrape: {e}') #print(scrape) pass # print(f'retrieved crypto session interface entries {len(interfaces)}') # loop interfaces, loop session attributes global task_count global session_records for i in interfaces: #print(i) peer_record_dict = {} session_record_dict = {} #global task_count all_sess = "" all_sess_found = False for line in i.split('\n'): if 'Interface: ' in line: crypto_session_interface = line.split(' ')[1] peer_record_dict.update({'crypto_session_interface': crypto_session_interface}) if 'Profile: ' in line: p1_profile = line.split(' ')[1] peer_record_dict.update({'p1_profile': p1_profile}) if 'Session status: ' in line: session_status = line.split('Session status: ')[1] peer_record_dict.update({'session_status': session_status}) if 'Peer: ' in line: peer_ip = line.split(' ')[1] peer_port = line.split(' ')[3] p2_fvrf = line.split(' ')[5].replace('(', '').replace(')', '') # p1_vrf = line.split(' ')[7] peer_record_dict.update({'peer_ip': peer_ip, 'peer_port': peer_port, 'p2_fvrf': p2_fvrf}) if 'Phase1_id: ' in line: peer_vpn_id = line.lstrip().split(' ')[1].replace('(', '').replace(')', '') peer_record_dict.update({'peer_vpn_id': peer_vpn_id}) # split all lines from 'IKEv1 SA: ' to end if not all_sess_found: if any(ike in line for ike in ['IKEv1 SA: ', 'IKE SA: ']): all_sess += f'{line}\n' all_sess_found = True elif 'Session ID: ' in line: pass else: all_sess += f'{line}\n' # breakout each session, this can be P1 only with 'IKEv1 SA: ' or P1 + P2 with 'IKEv1 SA: ' and 'IPSEC FLOW: ' all_sess_pairs = [] for line in all_sess.split('\n'): if any(ike in line for ike in ['IKEv1 SA: ', 'IKE SA: ']): all_sess_pairs.append(f'{line}\n') else: all_sess_pairs[-1] += f'{line}\n' for asp in all_sess_pairs: #print(asp) ipsec_flow =[] for line in asp.split('\n'): # if 'IKEv1 SA: ' in line: # fails for older cisco if any(ike in line for ike in ['IKEv1 SA: ', 'IKE SA: ']): local_ip = line.lstrip().split(' ')[3].split('/')[0] local_port = line.lstrip().split(' ')[3].split('/')[1] session_record_dict.update({'local_ip': local_ip, 'local_port': local_port}) if 'connid:' in line: c_id = line.lstrip().split(' ')[1].split(':')[1] session_record_dict.update({'c_id': c_id}) if 'IPSEC FLOW: ' in line: acl = line.lstrip().split('FLOW: ')[1] ipsec_flow.append(acl) if len(ipsec_flow) >0: session_record_dict.update({'ipsec_flow': ipsec_flow}) # merge peer record component with each session record session_record_dict.update(peer_record_dict) # print(session_record_dict) # check for required fields for complete/valid session record if all(m in session_record_dict for m in ['local_ip', 'peer_ip', 'c_id']): task_count += 1 print('session interface entries processed %d\r'%task_count, end="") session_records.append(session_record_dict) # init vars, start device_name = device_record['DeviceName'] device_table = collection['temp'][device_name] # create/use-existing temp subcollection peer_ips = [] sessions = [] session_count = 0 global session_records session_records = [] global task_count task_count = 0 # get scrapes with Scrapli(**connection) as conn: peers = conn.send_command('show crypto session brief') for line in peers.result.split('\n'): if len(line) >0 and not any(exclude in line for exclude in ['ivrf = ', 'Peer ', 'Status: ', 'No IKE']): # print(line) format_line = ' '.join(line.split()) ip = format_line.split(' ')[0] # print(ip) peer_ips.append(format_line.split(' ')[0]) if len(peer_ips) >0: print(f'crypto session count {len(peer_ips)}') for ip in peer_ips: session_count += 1 print('lookup crypto sessions %d\r'%session_count, end="") session = conn.send_command(f'show crypto session remote {ip} detail') # print(session.result) sessions.append(session.result) # process scrapes print(f'lookup crypto sessions {len(sessions)}') if len(sessions) >0: n_cores = os.cpu_count() partial_function = partial(process_session) #with ThreadPoolExecutor(max_workers=1) as executor: # debug with ThreadPoolExecutor(max_workers=n_cores) as executor: executor.map(partial_function, sessions) print(f'session interface entries processed {task_count}') # write to db if len(session_records) >0: requests = [] for i in session_records: record = i filter = {'c_id': record['c_id']} requests.append(UpdateMany(filter, {'$set': record}, upsert=True)) result = device_table.bulk_write(requests) print(result.bulk_api_result) return result #### scrape 3) # 'show crypto ipsec sa peer {p}' # {'p2_interface': 'Tunnel6', 'local_ip': '10.225.112.42', 'peer_ip': '10.225.56.110', 'peer_port': '500', 'protected_vrf': 'none', 'pfs': 'N', 'p2_encr_algo': 'esp-256-aes', 'p2_hash_algo': 'esp-sha-hmac', 'p2_status': 'ACTIVE', 'crypto_map': 'Tunnel6-head-0'} # correlate to (scrape 1) + scrape 2)) with keys 'local_ip' 'peer_ip' 'peer_port' def cisco_vpn_phase2(collection, command_output, device_record, connection): print('\ncisco_vpn_phase2') def process_p2_scrapes(record): global p2_scrapes global empty_p2_scrapes global p2_records interfaces = [] if not len(record) >0: empty_p2_scrapes += 1 else: for line in record.split('\n'): if 'interface: ' in line: interfaces.append(f'{line}\n') else: interfaces[-1] += f'{line}\n' # print(f'manual scrape tunnel interface count {len(interfaces)}') for int in interfaces: # reset interface vars for va in ['p2_interface', 'local_ip']: if va in locals(): del va # get interface vars for line in int.split('\n'): if 'interface' in line: p2_interface = line.split(' ')[1] if 'local addr' in line: local_ip = line.split('addr ')[1] # strip up to 'protected vrf:' for ivrf loop intf = "" tag_found = False for line in int.split('\n'): if not tag_found: if 'protected vrf:' in line: intf += f'{line}\n' tag_found = True else: intf += f'{line}\n' # loop vrfs vrfs = [] for line in intf.split('\n'): if 'protected vrf:' in line: vrfs.append(f'{line}\n') else: vrfs[-1] += f'{line}\n' for v in vrfs: # reset ivrf vars for va in ['peer_ip', 'peer_port', 'vrf', 'pfs', 'transform', 'p2_encr_algo', 'p2_hash_algo', 'status', 'crypto_map']: if va in locals(): del va peer_ip_l = [] peer_port_l = [] vrf_l = [] pfs_l = [] transform_l = [] status_l = [] crypto_map_l = [] p2_record_dict = {} # get vrf vars for line in v.split('\n'): if 'current_peer' in line: peer_ip_l.append(line.lstrip(' ').split(' ')[1]) peer_port_l.append(line.lstrip(' ').split(' ')[3]) if 'PFS' in line: pfs_l.append(line.lstrip(' ').split(' ')[2].split(',')[0].lower()) if 'transform' in line: transform_l.append(line.lstrip(' ').split('transform: ')[1].split(' ,')[0]) if 'crypto map: ' in line: crypto_map_l.append(line.lstrip(' ').split('crypto map: ')[1]) if 'Status' in line: status_l.append(line.split('Status: ')[1].split('(')[0]) if 'protected vrf' in line: vrf_l.append(line.split('protected vrf: ')[1].replace('(', '').replace(')', '')) # write vrf vars to record dict p2_record_dict.update({'p2_interface': p2_interface}) p2_record_dict.update({'local_ip': local_ip}) if len(peer_ip_l) >0: #peer_ip = peer_ip_l[0] p2_record_dict.update({'peer_ip': peer_ip_l[0]}) if len(peer_port_l) >0: #peer_port = peer_port_l[0] p2_record_dict.update({'peer_port': peer_port_l[0]}) if len(vrf_l) >0: #vrf = vrf_l[0] p2_record_dict.update({'protected_vrf': vrf_l[0]}) if len(pfs_l) >0: #pfs = pfs_l[0].upper() p2_record_dict.update({'pfs': pfs_l[0].upper()}) # else: # pfs = 'N' if len(transform_l) >0: transform = transform_l[0] #p2_encr_algo = transform.split(' ')[0] p2_record_dict.update({'p2_encr_algo': transform.split(' ')[0]}) #p2_hash_algo = transform.split(' ')[1] p2_record_dict.update({'p2_hash_algo': transform.split(' ')[1]}) if len(status_l) >0: #status = status_l[0] p2_record_dict.update({'p2_status': status_l[0]}) if len(crypto_map_l) >0: #crypto_map = crypto_map_l[0] p2_record_dict.update({'crypto_map': crypto_map_l[0]}) # print(p2_record_dict) # check for required fields for complete/valid p2 record if all(include in p2_record_dict for include in ['local_ip', 'peer_ip', 'peer_port']): # print(p2_record_dict) p2_records.append(p2_record_dict) p2_scrapes += 1 print('phase2 scrape processed %d\r'%p2_scrapes, end="") def get_p2_scrapes(peers, connection): global scrape_count peer_commands = [f'show crypto ipsec sa peer {p}' for p in set(peers)] print(f'phase2 peer commands prepared {len(peer_commands)}') p2scrapes = [] if len(peers) >0: try: with Scrapli(**connection) as conn: for c in peer_commands: p2scrape = conn.send_command(c) p2scrapes.append(p2scrape) scrape_count += 1 print('lookup phase2 peers %d\r'%scrape_count, end="") return p2scrapes except Exception as e: print(f'exception_type: {type(e).__name__}') # pass print('phase2 failback parser method error collecting scrapes, possible incomplete records for this device') return p2scrapes else: print('phase2 failback parser method error, no phase1 peers/tunnels') return p2scrapes # init vars device_name = device_record['DeviceName'] device_table = collection['temp'][device_name] # create/use-existing temp subcollection global scrape_count scrape_count = 0 global p2_scrapes p2_scrapes = 0 global empty_p2_scrapes empty_p2_scrapes = 0 global p2_records p2_records = [] # lookup all peer_ip peer_ip_query = device_table.find({"peer_ip": { "$exists": True }}, {"peer_ip":1, "_id":0}) peers = [r['peer_ip'] for r in peer_ip_query] # get p2 scrapes result = get_p2_scrapes(peers, connection) print(f'lookup phase2 peers {scrape_count}') # process p2 scrapes if len(result) >0: scrape_results = [s.result for s in result] n_cores = os.cpu_count() partial_function = partial(process_p2_scrapes) # with ThreadPoolExecutor(max_workers=1) as executor: # debug with ThreadPoolExecutor(max_workers=n_cores) as executor: executor.map(partial_function, scrape_results) print(f'phase2 scrape processed {p2_scrapes}') print(f'empty phase2 scrapes {empty_p2_scrapes}') # write to db if len(p2_records) >0: requests = [] for i in p2_records: record = i filter = {'local_ip': record['local_ip'], 'peer_ip': record['peer_ip'], 'peer_port': record['peer_port']} requests.append(UpdateMany(filter, {'$set': record}, upsert=True)) result = device_table.bulk_write(requests) print(result.bulk_api_result) return result #### scrape 4) # {'crypto_map': 'Tunnel6-head-0', 'peer_ip': '10.227.112.50', 'pfs': 'N', 'transform_sets': [{'name': 'TS-AES256-SHA', 'p2_encr_algo': 'esp-256-aes', 'p2_hash_algo': 'esp-sha-hmac'}, {'name': 'TS-3DES-SHA', 'p2_encr_algo': 'esp-3des', 'p2_hash_algo': 'esp-sha-hmac'}], 'crypto_map_interface': ['Tunnel6'], 'RRI_enabled': False, 'default_p2_3des': False} # correlate to (scrape 1) + scrape 2) + scrape 3)) with keys 'peer_ip' 'crypto_map' def cisco_crypto_map(collection, command_output, device_record, connection): print('\ncisco_crypto_map') def process_cryptomaps(cryptomap): # print(cryptomap) # loop lines in cryptomap entry global crypto_map_count global cryptomap_records tfs_found = False int_found = False tfset = [] crypto_map_interface = [] cryptomap_record_dict = {} for line in cryptomap.split('\n'): if 'Crypto Map "' in line: # older variant of ios crypto_map = line.split(' ')[2].replace('"', '') cryptomap_record_dict.update({'crypto_map': crypto_map}) if 'Crypto Map IPv4 "' in line: # newer variant of ios crypto_map = line.split(' ')[3].replace('"', '') cryptomap_record_dict.update({'crypto_map': crypto_map}) if 'ISAKMP Profile: ' in line: p1_profile = line.split(' ')[2] cryptomap_record_dict.update({'p1_profile': p1_profile}) if 'Current peer: ' in line: peer_ip = line.split(' ')[2] cryptomap_record_dict.update({'peer_ip': peer_ip}) # RRI devices use dynamic crypto map templates, the name of the crypto map may not match the template name CM-BML-RRI != CDM-BML-RRI if 'dynamic (created from dynamic map ' in line: # dynamic (created from dynamic map CDM-BML-RRI/200) crypto_map_template = line.split('dynamic map ')[1].split('/')[0] cryptomap_record_dict.update({'crypto_map_template': crypto_map_template}) if 'PFS (Y/N): ' in line: pfs = line.split(' ')[2].upper() cryptomap_record_dict.update({'pfs': pfs}) if not tfs_found: if 'Transform sets=' in line: tfs_found = True pass elif ' } ,' in line: tfs_name = line.split(' ')[0].split(':')[0] tfs_encr_algo = line.replace(' ', ' ').split(' ')[2] tfs_hash_algo = line.replace(' ', ' ').split(' ')[3] tfset.append({'name': tfs_name, 'p2_encr_algo': tfs_encr_algo, 'p2_hash_algo': tfs_hash_algo}) else: tfs_found = False if 'Reverse Route Injection Enabled' in line: cryptomap_record_dict.update({'RRI_enabled': True}) if not int_found: if 'Interfaces using crypto map ' in line: int_found = True pass else: if len(line) >0: crypto_map_interface.append(line) # add possible list items to cryptomap record if len(tfset) >0: cryptomap_record_dict.update({'transform_sets': tfset}) if len(crypto_map_interface) >0: cryptomap_record_dict.update({'crypto_map_interface' : crypto_map_interface}) # catch absence of RRI if 'RRI_enabled' not in cryptomap_record_dict: cryptomap_record_dict.update({'RRI_enabled': False}) # # DISABLE - transform_sets is dynamic, not the best source of truth # # determine if 1st/default P2 transform set is 3des # if 'transform_sets' in cryptomap_record_dict: # if '3des' in cryptomap_record_dict['transform_sets'][0]['p2_encr_algo'].lower(): # cryptomap_record_dict.update({'default_p2_3des': True}) # else: # cryptomap_record_dict.update({'default_p2_3des': False}) # # print(cryptomap_record_dict) # check for required fields for complete/valid cryptomap record (if the cryptomap has no peer_ip it has no use) if all(include in cryptomap_record_dict for include in ['peer_ip', 'crypto_map']): # print(cryptomap_record_dict) cryptomap_records.append(cryptomap_record_dict) crypto_map_count += 1 print('cryptomaps processed %d\r'%crypto_map_count, end="") # start, init vars global crypto_map_count crypto_map_count = 0 global cryptomap_records cryptomap_records = [] device_name = device_record['DeviceName'] device_table = collection['temp'][device_name] # create/use-existing temp subcollection output = command_output['output'] # print(output.result) if output == 'error': print('parse failure, output too large for screen scrape, consider a command targetted by peer') elif output == 'compound': print('cisco_crypto_map is not a compound command') else: # strip everything up to first 'Crypto Map IPv4 ' scrape = "" tag_found = False for line in output.result.split('\n'): if not tag_found: #if 'Crypto Map IPv4 ' in line: if 'Crypto Map ' in line: scrape += f'{line}\n' tag_found = True else: scrape += f'{line}\n' # print(scrape) # split document into cryptomap entries cryptomaps = [] crypto_map_found_count = 0 try: if len(scrape) >0: for line in scrape.split('\n'): # this can fail but only on a huge scrape which is hard to see - mep-shared-rri-agg09 # if 'Crypto Map IPv4 ' in line: # will not work on older cisco if 'Crypto Map ' in line: cryptomaps.append(f'{line}\n') crypto_map_found_count += 1 print('lookup crypto maps %d\r'%crypto_map_found_count, end="") elif 'Crypto Map: ' in line: # these lines list the isakmp profile for the ipsec cryptomap profile, shorthand output that is not required pass else: cryptomaps[-1] += f'{line.lstrip()}\n' print(f'lookup crypto maps {crypto_map_found_count}') except Exception as e: print(f'failed to process cryptomap scrape: {e}') # print(scrape) pass # process cryptomap scrapes n_cores = os.cpu_count() partial_function = partial(process_cryptomaps) # with ThreadPoolExecutor(max_workers=1) as executor: # debug with ThreadPoolExecutor(max_workers=n_cores) as executor: executor.map(partial_function, cryptomaps) print(f'cryptomaps processed {crypto_map_count}') # write to db if len(cryptomap_records) >0: requests = [] for i in cryptomap_records: record = i filter = {'peer_ip': record['peer_ip'], 'crypto_map': record['crypto_map']} requests.append(UpdateMany(filter, {'$set': record}, upsert=True)) result = device_table.bulk_write(requests) print(result.bulk_api_result) return result def cisco_isakmp_policy(collection, command_output, device_record, connection): print('\ncisco_isakmp_policy') device_name = device_record['DeviceName'] ip = device_record['IPAddress'] output = command_output['output'] if not output == 'error': scrape = "" tag_found = False isakmp_policy = [] # split scrape by policies for line in output.result.split('\n'): # print(line) if not tag_found: if 'Global IKE policy' in line: tag_found = True else: scrape += f'{line}\n' # print(scrape) # split policies by suite suite = [] if len(scrape) >0: for line in scrape.split('\n'): if 'Protection suite of priority ' in line: suite.append(f'{line}\n') else: suite[-1] += f'{line}\n' # get suite attributes for s in suite: suite_dict = {} # print(s) for line in s.split('\n'): #print(line) sline = line.lstrip() if 'Protection suite of priority' in sline: priority = sline.split(' ')[4] # print(priority) suite_dict.update({'priority': priority}) if 'encryption algorithm:' in sline: if 'Advanced Encryption Standard' in sline: enc_algo = 'aes' elif 'Three key triple DE' in sline: enc_algo = '3des' elif 'Data Encryption Standard' in sline: enc_algo = 'des' else: enc_algo = 'no_match' if enc_algo != 'no_match': enc_kb = [int(x) for x in sline[sline.find("(")+1:sline.find(")")].split() if x.isdigit()] enc_kb = str(enc_kb[0]) if len(enc_kb) >0 else '' if len(enc_kb) >0: enc_algo = enc_algo + '_' + str(enc_kb) # print(enc_algo) suite_dict.update({'enc_algo': enc_algo}) if 'hash algorithm:' in sline: if 'Secure Hash Standard 2' in sline: hash_algo = 'sha2' elif 'Secure Hash Standard' in sline: hash_algo = 'sha' elif 'Message Digest 5' in sline: hash_algo = 'md5' else: hash_algo = 'no_match' if hash_algo != 'no_match': hash_kb = [int(x) for x in sline[sline.find("(")+1:sline.find(")")].split() if x.isdigit()] hash_kb = str(hash_kb[0]) if len(hash_kb) >0 else '' if len(hash_kb) >0: hash_algo = hash_algo + '_' + str(hash_kb) # print(hash_algo) suite_dict.update({'hash_algo': hash_algo}) if 'authentication method:' in sline: if 'Pre-Shared Key' in sline: auth_type = 'psk' else: auth_type = 'no_match' if 'Diffie-Hellman group:' in sline: dh_group = sline.split('Diffie-Hellman group:')[1].split(' ')[0].lstrip().replace('#', '') dh_group_kb = [int(x) for x in sline[sline.find("(")+1:sline.find(")")].split() if x.isdigit()] dh_group_kb = str(dh_group_kb[0]) if len(dh_group_kb) >0 else '' if len(dh_group_kb) >0: dh_group = dh_group + '_' + str(dh_group_kb) # print(dh_group) suite_dict.update({'dh_group': dh_group}) # print(suite_dict) isakmp_policy.append(suite_dict) # get isakmp policy precedence if len(isakmp_policy) >0: # print(isakmp_policy) print(f'isakmp policy entry count {len(isakmp_policy)}') update = {'isakmp_policy': isakmp_policy} # isakmp_policy.append({'priority': '0', 'enc_algo': '3des', 'hash_algo': 'sha', 'dh_group': '14_2048'}) # debug # find highest priority isakmp policy enc_algo highest_priority_policy = sorted([int(x['priority']) for x in isakmp_policy])[0] highest_priority_policy_algo = [x['enc_algo'] for x in isakmp_policy if x['priority'] == str(highest_priority_policy)] if 'des' in highest_priority_policy_algo[0]: # print(highest_priority_policy_algo[0]) update.update({'isakmp_policy_default_p1_3des': True}) else: update.update({'isakmp_policy_default_p1_3des': False}) # update _default table with device isakmp policy result # for i in update['isakmp_policy']: # print(i) # print(update['isakmp_policy_default_p1_3des']) filter = {'DeviceName': device_name} result = collection.update_one(filter, {'$set': update}, upsert=True) # print(dir(result)) # print(result.acknowledged) # need to print the full insert update upsert stuff return result else: print(f'isakmp policy entry count {len(isakmp_policy)}') def cisco_transform_set(collection, command_output, device_record, connection): print('\ncisco_transform_set') def process_transform_set(scrape): # print(scrape) tfs_found = False tfset = [] for line in scrape.split('\n'): sline = line.lstrip() if not tfs_found: if 'Transform sets=' in sline: tfs_found = True pass elif ' } ,' in sline: tfs_name = sline.split(' ')[0].split(':')[0] tfs_encr_algo = sline.replace(' ', ' ').split(' ')[2] tfs_hash_algo = sline.replace(' ', ' ').split(' ')[3] tfset.append({'name': tfs_name, 'p2_encr_algo': tfs_encr_algo, 'p2_hash_algo': tfs_hash_algo}) else: tfs_found = False return tfset ## init vars update_src = [] device_name = device_record['DeviceName'] device_type = device_record['DeviceType'] device_table = collection['temp'][device_name] requests = [] ## dmvpn lookup ordered transform set # print('dmvpn') if device_type in ["IP-VPNHUB", "IP-VCSR-HUB"]: tunnel_interfaces = device_table.distinct('p2_interface') print(f'tunnel interfaces {tunnel_interfaces}') if len(tunnel_interfaces) >0: for t in tunnel_interfaces: interface_name = t with Scrapli(**connection) as conn: interface = conn.send_command(f'show interface {interface_name}') parsed = interface.genie_parse_output() # print(json.dumps(parsed, indent=4)) if 'tunnel_profile' in parsed[interface_name]: ipsec_profile_name = parsed[interface_name]['tunnel_profile'] elif 'Tunnel protection via IPSec' in interface.result: # some ios genie outputs are not fully parsed, failback to manual parse for line in interface.result.split('\n'): if 'Tunnel protection via IPSec' in line: ipsec_profile_name = [a for a in line[line.find("(")+1:line.find(")")].split()][1] # print(ipsec_profile_name) if 'ipsec_profile_name' in locals(): with Scrapli(**connection) as conn: ipsec_profile = conn.send_command(f'show crypto ipsec profile {ipsec_profile_name}') # print(ipsec_profile.result) transform_set = process_transform_set(ipsec_profile.result) match_field = 'p2_interface' match_field_value = t update_src.append({'match_field': match_field, 'match_field_value': match_field_value, 'transform_set': transform_set}) ## rri lookup ordered transform set # print('rri') if device_type in ["IP-VPNAGG", "IP-P2PAGG"]: crypto_map_templates = device_table.distinct('crypto_map_template') # print(crypto_map_templates) if len(crypto_map_templates) >0: for t in crypto_map_templates: with Scrapli(**connection) as conn: crypto_map = conn.send_command(f'show crypto dynamic-map tag {t}') # print(crypto_map.result) transform_set = process_transform_set(crypto_map.result) match_field = 'crypto_map_template' match_field_value = t update_src.append({'match_field': match_field, 'match_field_value': match_field_value, 'transform_set': transform_set}) ## build db update requests # print(json.dumps(update_src, indent=4)) if len(update_src) >0: for r in update_src: query = {r['match_field']: r['match_field_value']} # print(query) object_ids = [d for d in device_table.distinct('_id', query)] # print(object_ids) query = { "_id" : { "$in" : object_ids } } update = {'ordered_transform_set': r['transform_set']} requests.append(UpdateMany(query, {'$set': update}, upsert=True)) # print(requests) ## bulk update collection documents with ordered_transform_sets if len(requests) >0: dst_result = device_table.bulk_write(requests) print(dst_result.bulk_api_result) return dst_result def triple_des_check(collection, command_output, device_record, connection): print('\ntriple_des_check') ## owing to the age of mongodb 3.0.15 some filters/operators are not available, the following queries could otherwise be merged and done in bulk in the query language with a huge performance uptick # "$arrayElemAt" "$first" "$slice", "$regex" also does not honour read ahead negative match (?!3des) # https://stackoverflow.com/questions/29664097/what-is-the-syntax-for-mongodb-query-for-boolean-values # https://www.tutorialspoint.com/get-the-first-element-in-an-array-and-return-using-mongodb-aggregate def p2_encr_algo_check(collection, triple_des_match = True): if triple_des_match: #regex_statement = {'$regex': '.*3des.*', '$options': 'i'} regex_statement = re.compile('(?i).*3DES.*') else: regex_statement = {'$not': re.compile('(?i).*3DES.*')} result = collection.aggregate([ {"$match": {"ordered_transform_set": {"$exists": True}}}, {"$match": {'p2_encr_algo': regex_statement }}, {"$project": {"_id": 1}} ]) matched_doc_ids = [d['_id'] for d in result] # print(dumps(matched_doc_ids, indent=4)) return matched_doc_ids def first_ordered_transform_set_check(collection, doc_ids, triple_des_match = True): matched_doc_ids = [] if triple_des_match: regex_statement = re.compile('(?i).*3DES.*') else: regex_statement = {'$not': re.compile('(?i).*3DES.*')} for doc_id in doc_ids: result = collection.aggregate([ {"$match": { "_id" : doc_id }}, {"$unwind": "$ordered_transform_set"}, {"$limit": 1 }, {"$match": {'ordered_transform_set.p2_encr_algo': regex_statement}}, {"$project": {"_id": 1}} ]) for result_id in [d['_id'] for d in result]: matched_doc_ids.append(result_id) # print(dumps(matched_doc_ids, indent=4)) return matched_doc_ids def tdes_requests_builder(requests, doc_ids, p2_default_3des, spoke_p2_default_3des, spoke_p2_algo_preference): if len(doc_ids) >0: update = {} update.update({'p2_default_3des': p2_default_3des}) if spoke_p2_default_3des != 'unset': update.update({'spoke_p2_default_3des': spoke_p2_default_3des}) update.update({'spoke_p2_algo_preference': spoke_p2_algo_preference}) # print(json.dumps(update, indent=4)) query = { "_id" : { "$in" : doc_ids } } requests.append(UpdateMany(query, {'$set': update}, upsert=True)) return requests ## init vars device_name = device_record['DeviceName'] device_table = collection['temp'][device_name] requests = [] #### p2_encr_algo 3des triple_des_match = True tdes_doc_ids = p2_encr_algo_check(device_table, triple_des_match) ## 1st ordered_transform_set 3des triple_des_match = True tdes_tdes_ids = first_ordered_transform_set_check(device_table, tdes_doc_ids, triple_des_match) # p2_default_3des = True / spoke_p2_default_3des = unset / spoke_p2_algo_preference = unknown requests = tdes_requests_builder(requests, tdes_tdes_ids, True, 'unset', 'unknown') ## 1st ordered_transform_set NOT 3des triple_des_match = False tdes_ntdes_ids = first_ordered_transform_set_check(device_table, tdes_doc_ids, triple_des_match) # p2_default_3des False / spoke_p2_default_3des True / spoke_p2_algo_preference = 3des requests = tdes_requests_builder(requests, tdes_ntdes_ids, False, True, '3des') #### p2_encr_algo NOT 3des triple_des_match = False ntdes_doc_ids = p2_encr_algo_check(device_table, triple_des_match) ## 1st ordered_transform_set 3des triple_des_match = True ntdes_tdes_ids = first_ordered_transform_set_check(device_table, ntdes_doc_ids, triple_des_match) # p2_default_3des True / spoke_p2_default_3des False / spoke_p2_algo_preference = not 3des requests = tdes_requests_builder(requests, ntdes_tdes_ids, True, False, 'not 3des') ## 1st ordered_transform_set NOT 3des triple_des_match = False ntdes_ntdes_ids = first_ordered_transform_set_check(device_table, ntdes_doc_ids, triple_des_match) # p2_default_3des False / spoke_p2_default_3des unset / spoke_p2_algo_preference = unknown requests = tdes_requests_builder(requests, ntdes_ntdes_ids, False, 'unset', 'unknown') ## bulk update collection documents with ordered_transform_sets if len(requests) >0: result = device_table.bulk_write(requests) print(result.bulk_api_result) return result