-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathFuzzix.py
220 lines (171 loc) · 6.74 KB
/
Fuzzix.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
import argparse
from Fuzzix.Data import Host, URL, Settings, Dir
from Fuzzix.Util import WebApi, Content_Worker, Content, TERMINATE_WORKER
from Fuzzix import Logger, print_banner
class URL_Fuzzer:
"""class to perform spidering and fuzzing tasks"""
def __init__(self, host):
if type(host) is Host:
self.host = host
else:
raise ValueError("wrong type for attribute host!")
Logger.info("fuzzing url", self.host.getURL())
@staticmethod
def __spiderworker__(content):
"""
the function called by Util.Content_Worker to spider the given url
attribute content: the content to proceed
return: A proceeded content object
"""
rootURL = content.getURL()
newContent = Content(rootURL)
newContent.setContentType(content.getContentType())
newContent.setStatus(content.getStatus())
if content.getStatus() in URL.GOOD_STATUS:
if ('text' in content.getContentType() or 'script' in content.getContentType()):
refs = WebApi.grabRefs(content.getContent())
urls =[]
for ref in refs:
try:
#try to prettify url
url = URL.prettifyURL(rootURL,ref)
urls.append(url)
except ValueError:
continue
#returning extracted urls
newContent.setContentType("linklist")
newContent.setContent(urls)
return newContent
def spider(self):
"""
spider-routine of the URL_Fuzzer
return: None
"""
Logger.info("Spidering URL", self.host.getURL())
toProceed=[] #buffer for open tasks
doneURLs=[] #deadlock protection
#starting on website-root
rootcontent = Content(self.host.getURL())
rootcontent.setProcessor(URL_Fuzzer.__spiderworker__)
toProceed.append(rootcontent)
for i in range(0, Settings.readAttribute("recursion_depth",0)):
#writing buffer in queue
for a in range(0,len(toProceed)):
content = toProceed.pop()
Content_Worker.queue.put(content)
#waiting for workers to finish
Logger.info('Processing recursion', i, Content_Worker.queue.qsize(), 'task(s) to be done')
Content_Worker.queue.join()
#processing finished resulsts
Logger.info(Content_Worker.done.qsize(),"result(s) to analyze")
while not Content_Worker.done.empty():
content = Content_Worker.done.get()
Content_Worker.done.task_done()
if content.getContentType() != "linklist":
continue
urls = content.getContent()
for url in urls:
path = url.getPath()
if self.host.isExternal(url) or url.getURL() in doneURLs or len(path) == 0:
continue
doneURLs.append(url.getURL())
length = content.getSize()
self.host.getRootdir().appendPath(path, length)
newContent = Content(url)
newContent.setProcessor(URL_Fuzzer.__spiderworker__)
toProceed.append(newContent)
#printing result
print(self.host.getRootdir())
Logger.info("spidering completed")
def fuzz(self):
Logger.info("fuzzing URL", self.host.getURL())
Logger.info("fuzzing completed")
def startup():
"""
initializes the program, writes all startup options in the Settings Object
return: None
"""
# parsing command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('url', metavar='url', help="the victims adress", type=str, nargs='+')
parser.add_argument('-t', '--threads', help="the amount of threads to use", type=int, default=64)
parser.add_argument('-r', '--recursion', help="the maximum recursion depth", type=int, default=5)
parser.add_argument("-s", "--spider", action="store_true",
help="spider the given website")
parser.add_argument("-f", "--fuzz", action="store_true", help="fuzz the given url to discover hidden files")
parser.add_argument("--verifyCert", action="store_true", help="verify the hosts certificate")
opts = parser.parse_args()
# parse command line args
host_url = opts.url[1]
spider = opts.spider
fuzz = opts.fuzz
verify_cert = opts.verifyCert
threads = opts.threads
recursion_depth = opts.recursion
# write attributes to Settings
try:
# check weither given URL is valid
if not URL.isValidURL(host_url):
raise ValueError(host_url + " is not a valid URL")
Settings.writeAttribute("host_url",host_url)
Settings.writeAttribute("spider",spider)
Settings.writeAttribute("fuzz",fuzz)
Settings.writeAttribute("verify_cert",verify_cert)
Settings.writeAttribute("threads",threads)
Settings.writeAttribute("recursion_depth",recursion_depth)
Settings.readConfig("config/config.ini")
WebApi.setProtocol(URL(Settings.readAttribute("host_url","")).getProto())
except ValueError as e:
raise e
def startWorkers(amount=4):
"""
starts the workers
attribute amount: the amount of workers to start
return: None
"""
Logger.info("Starting " + str(amount) + " threads")
for i in range(0, amount):
c = Content_Worker()
c.start()
Content_Worker.workers.append(c)
Logger.info("Threads started")
def stopWorkers():
"""
stops the running workers
return: None
"""
Logger.info("stopping workers")
for w in Content_Worker.workers:
Content_Worker.queue.put(TERMINATE_WORKER)
for w in Content_Worker.workers:
w.join()
Logger.info("stopped workers")
def shutdown():
"""
cleans up and stops the program
return: None
"""
try:
stopWorkers()
except:
Logger.error("failed to stop threads!")
Logger.info("finished scan")
exit()
if __name__ == "__main__":
print_banner()
try:
startup()
startWorkers(Settings.readAttribute("threads",0))
except ValueError as e:
Logger.error(e)
exit()
targetHost = Host(
URL(Settings.readAttribute("host_url","")),
Dir(Settings.readAttribute("root_dir",""), None)
)
urlFuzzer = URL_Fuzzer(targetHost)
if Settings.readAttribute("spider",False):
urlFuzzer.spider()
if Settings.readAttribute("fuzz",False):
urlFuzzer.fuzz()
shutdown()