Skip to content

Commit

Permalink
Added filepath check
Browse files Browse the repository at this point in the history
  • Loading branch information
LoH-lu committed Jul 2, 2024
1 parent 5cd38a4 commit f32bd73
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 19 deletions.
13 changes: 9 additions & 4 deletions netbox_push.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
import csv
import pynetbox
from netbox_connection import connect_to_netbox
import os
from concurrent.futures import ThreadPoolExecutor
import configparser
import pynetbox
from tqdm import tqdm
from netbox_connection import connect_to_netbox

# Get the directory of the current script
script_dir = os.path.dirname(os.path.abspath(__file__))

def process_row(row, pbar):
"""
Expand Down Expand Up @@ -67,7 +71,8 @@ def write_data_to_netbox(url, token, csv_file):
global netbox
netbox = connect_to_netbox(url, token)

with open(csv_file, 'r') as file:
csv_file_path = os.path.join(script_dir, csv_file)
with open(csv_file_path, 'r') as file:
reader = csv.DictReader(file)
rows = list(reader)

Expand All @@ -81,7 +86,7 @@ def write_data_to_netbox(url, token, csv_file):

# Read URL and token from var.ini
config = configparser.ConfigParser()
config.read('var.ini')
config.read(os.path.join(script_dir, 'var.ini'))
url = config['credentials']['url']
token = config['credentials']['token']

Expand Down
16 changes: 10 additions & 6 deletions nmap_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
from datetime import datetime
import os

# Get the directory of the current script
script_dir = os.path.dirname(os.path.abspath(__file__))

def get_file_path(directory, date_time):
"""
Generate a file path based on the directory and date.
Expand All @@ -13,7 +16,7 @@ def get_file_path(directory, date_time):
Returns:
- file_path (str): The full file path based on the directory and date.
"""
return os.path.join(directory, f'nmap_results_{date_time.strftime("%Y-%m-%d_%H-%M-%S")}.csv')
return os.path.join(script_dir, directory, f'nmap_results_{date_time.strftime("%Y-%m-%d_%H-%M-%S")}.csv')

def get_latest_files(directory, num_files=2):
"""
Expand All @@ -26,19 +29,20 @@ def get_latest_files(directory, num_files=2):
Returns:
- files (list): The list of latest CSV file names.
"""
files = [f for f in os.listdir(directory) if f.endswith('.csv')]
files.sort(key=lambda x: os.path.getmtime(os.path.join(directory, x)), reverse=True)
full_directory = os.path.join(script_dir, directory)
files = [f for f in os.listdir(full_directory) if f.endswith('.csv')]
files.sort(key=lambda x: os.path.getmtime(os.path.join(full_directory, x)), reverse=True)
return files[:num_files]

# Directory for result files
directory = 'results/'
directory = 'results'

# Get the two latest file paths
latest_files = get_latest_files(directory)
file_paths = [get_file_path(directory, datetime.strptime(file_name[13:32], "%Y-%m-%d_%H-%M-%S")) for file_name in latest_files]

# Output file path
output_file_path = 'ipam_addresses.csv'
output_file_path = os.path.join(script_dir, 'ipam_addresses.csv')

def read_csv(file_path):
"""
Expand Down Expand Up @@ -67,7 +71,7 @@ def write_csv(data, file_path):
- file_path (str): The path to the output CSV file.
"""
with open(file_path, 'w', newline='') as file:
fieldnames = ['address', 'dns_name', 'status', 'scantime', 'tags', 'tenant', 'VRF'] # Added 'VRF' to fieldnames
fieldnames = ['address', 'dns_name', 'status', 'scantime', 'tags', 'tenant', 'VRF']
writer = csv.DictWriter(file, fieldnames=fieldnames)

# Write header
Expand Down
24 changes: 15 additions & 9 deletions nmap_scan_multi_dns.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
# Lock for writing to CSV file
csv_lock = threading.Lock()

# Get the directory of the current script
script_dir = os.path.dirname(os.path.abspath(__file__))

def read_from_csv(filename):
"""
Expand All @@ -25,7 +27,8 @@ def read_from_csv(filename):
Returns:
- data (list): A list of dictionaries representing rows from the CSV file.
"""
with open(filename, 'r') as file:
filepath = os.path.join(script_dir, filename)
with open(filepath, 'r') as file:
reader = csv.DictReader(file)
data = [row for row in reader]
return data
Expand All @@ -42,8 +45,9 @@ def remove_scanned_prefixes(data, scanned_prefixes):
updated_data = [row for row in data if row['Prefix'] not in scanned_prefixes]

# Rewrite the updated data to the CSV file
with open('ipam_prefixes.csv', 'w', newline='') as file:
fieldnames = ['Prefix', 'VRF', 'Status', 'Tags', 'Tenant'] # Added 'VRF' to fieldnames
filepath = os.path.join(script_dir, 'ipam_prefixes.csv')
with open(filepath, 'w', newline='') as file:
fieldnames = ['Prefix', 'VRF', 'Status', 'Tags', 'Tenant']
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(updated_data)
Expand Down Expand Up @@ -110,12 +114,15 @@ def run_nmap_on_prefixes(data, output_folder):
results = []
scanned_prefixes = []

# Create the full path for the output folder
output_folder_path = os.path.join(script_dir, output_folder)

# Filter rows to scan only those with status 'active' and without the tag 'Disable Automatic Scanning'
rows_to_scan = [row for row in data if row['Status'] == 'active' and 'Disable Automatic Scanning' not in row['Tags']]

script_start_time = datetime.now() # Get the script start time

with ThreadPoolExecutor(max_workers=5) as executor: # Adjust the max_workers parameter based on your system's capabilities
with ThreadPoolExecutor(max_workers=5) as executor:
# Use executor.map to asynchronously run the scans and get results
futures = {executor.submit(run_nmap_on_prefix, row['Prefix'], row['Tenant'], row['VRF']): row for row in rows_to_scan}

Expand All @@ -125,8 +132,7 @@ def run_nmap_on_prefixes(data, output_folder):
with csv_lock:
results.extend(prefix_results)
scanned_prefixes.append(futures[future]['Prefix'])
write_results_to_csv(prefix_results, output_folder, script_start_time) # Pass script start time

write_results_to_csv(prefix_results, output_folder_path, script_start_time)

remove_scanned_prefixes(data, scanned_prefixes)
return results
Expand All @@ -151,8 +157,8 @@ def write_results_to_csv(results, output_folder, script_start_time):
# Check if the file is empty
is_empty = not os.path.exists(output_filename) or os.stat(output_filename).st_size == 0

with open(output_filename, 'a', newline='') as file: # Use 'a' (append) mode to add results to the file
fieldnames = ['address', 'dns_name', 'status', 'tags', 'tenant', 'VRF', 'scantime'] # Added 'VRF' to fieldnames
with open(output_filename, 'a', newline='') as file:
fieldnames = ['address', 'dns_name', 'status', 'tags', 'tenant', 'VRF', 'scantime']
writer = csv.DictWriter(file, fieldnames=fieldnames)

# Add headers if the file is empty
Expand All @@ -165,4 +171,4 @@ def write_results_to_csv(results, output_folder, script_start_time):
if __name__ == "__main__":
data = read_from_csv('ipam_prefixes.csv')
output_folder = 'results'
run_nmap_on_prefixes(data, output_folder)
run_nmap_on_prefixes(data, output_folder)

0 comments on commit f32bd73

Please sign in to comment.