-
Notifications
You must be signed in to change notification settings - Fork 1
/
crtsh-list.py
135 lines (130 loc) · 5.4 KB
/
crtsh-list.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
from colorama import *
from bs4 import BeautifulSoup
import requests
import argparse
import os
# Require a "domain" argument from the user
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--domain", required=True, help="Provide the name of the domain you want to search for.")
parser.add_argument("-o", "--output", required=False, help="Provide a name for the output file if you want to save the output.")
parser.add_argument("-g", "--grep", required=False, help="Grep the output to only return results that include the name of the domain.")
parser.add_argument("-fs", "--filters", required=False, help="Filter out specific words from the output, such as www. or *. for example, you can filter multiple words using a comma (example: -fs *.,www).")
args = parser.parse_args()
# Append the user-specified domain to the url and make the request
url = f"https://crt.sh/?q={args.domain}"
# Check if filename provided exists before making the request
if args.output:
if os.path.isfile(args.output):
print(Fore.RED + f"The file '{Fore.WHITE + args.output + Style.RESET_ALL + Fore.RED}' allready exists, quitting.")
quit()
# Try the GET request, if there are any problems then display the except message
try:
response = requests.get(url)
except:
print(Fore.RED + f"Could not make a request to: {Fore.WHITE + url + Style.RESET_ALL}")
quit()
# Defining some functions
def output_grep_filter():
filters_list = args.filters.split(",")
filters_list = [filter.strip() for filter in filters_list]
with open(args.output, "x") as file:
file.close()
for id, site in enumerate(result_list):
if all(filter not in site for filter in filters_list):
if args.grep in site:
with open(args.output, "a") as file:
file.write(site)
if id < len(result_list) - 1:
file.write("\n")
file.close()
print(site)
def output_grep():
with open(args.output, "x") as file:
file.close()
for id, site in enumerate(result_list):
if args.grep in site:
with open(args.output, "a") as file:
file.write(site)
if id < len(result_list) - 1:
file.write("\n")
file.close()
print(site)
def output_filter():
filters_list = args.filters.split(",")
filters_list = [filter.strip() for filter in filters_list]
with open(args.output, "x") as file:
file.close()
for id, site in enumerate(result_list):
if all(filter not in site for filter in filters_list):
with open(args.output, "a") as file:
file.write(site)
if id < len(result_list) - 1:
file.write("\n")
file.close()
print(site)
def grep_filter():
filters_list = args.filters.split(",")
filters_list = [filter.strip() for filter in filters_list]
for site in result_list:
if all(filter not in site for filter in filters_list):
if args.grep in site:
print(site)
def output():
with open(args.output, "x") as file:
file.close()
for id, site in enumerate(result_list):
with open(args.output, "a") as file:
file.write(site)
if id < len(result_list) - 1:
file.write("\n")
file.close()
print(site)
def grep():
for site in result_list:
if args.grep in site:
print(site)
def filter():
filters_list = args.filters.split(",")
filters_list = [filter.strip() for filter in filters_list]
for site in result_list:
if all(filter not in site for filter in filters_list):
print(site)
# If the response code is 200 then proceed with grabbing all the common names and matching identities found in the search
if response.status_code == 200:
soup = BeautifulSoup(response.text, "html.parser")
common_name_elements = soup.select("table tr td:nth-of-type(5)")
common_names = [element.text.strip() for element in common_name_elements]
matching_identities_elements = soup.select("table tr td:nth-of-type(6)")
matching_identities = [text.strip() for element in matching_identities_elements for text in element.stripped_strings]
# Remove any duplicates from the lists above
combined_list = common_names + matching_identities
result_list = set(combined_list)
# If every argument is used then do this
if args.output and args.grep and args.filters:
output_grep_filter()
# If only output and grep argument is used then do this
elif args.output and args.grep:
output_grep()
# If only output and filter argument is used then do this
elif args.output and args.filters:
output_filter()
# If only grep and filter argument is used then do this
elif args.grep and args.filters:
grep_filter()
# If only the output argument is used then do this
elif args.output:
output()
# If only the grep argument is used then do this
elif args.grep:
grep()
# If only the filter argument is used then do this
elif args.filters:
filter()
# If no filters are used then do this
else:
for site in result_list:
print(site)
# If there are any errors then display this message
else:
print(Fore.RED + f"Could not make a request to: {Fore.WHITE + url + Style.RESET_ALL}")
quit()