-
Notifications
You must be signed in to change notification settings - Fork 242
/
AWSBucketDump.py
executable file
·237 lines (189 loc) · 7.07 KB
/
AWSBucketDump.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
#!/usr/bin/env python
# AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
# It's similar to a subdomain bruteforcer but is made specifically to S3
# buckets and also has some extra features that allow you to grep for
# delicous files as well as download interesting files if you're not
# afraid to quickly fill up your hard drive.
# by Jordan Potti
# @ok_bye_now
from argparse import ArgumentParser
import requests
import xmltodict
import sys
import os
import shutil
import traceback
from queue import Queue
from threading import Thread, Lock
bucket_q = Queue()
download_q = Queue()
grep_list = None
arguments = None
def fetch(url):
print('Fetching ' + url + '...')
response = requests.get(url)
if response.status_code == 403 or response.status_code == 404:
status403(url)
if response.status_code == 200:
if "Content" in response.text:
status200(response, grep_list, url)
def bucket_worker():
while True:
item = bucket_q.get()
try:
fetch(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
bucket_q.task_done()
def downloadWorker():
print('Download worker running...')
while True:
item = download_q.get()
try:
downloadFile(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
download_q.task_done()
directory_lock = Lock()
def get_directory_lock():
directory_lock.acquire()
def release_directory_lock():
directory_lock.release()
def get_make_directory_return_filename_path(url):
global arguments
bits = url.split('/')
directory = arguments.savedir
for i in range(2,len(bits)-1):
directory = os.path.join(directory, bits[i])
try:
get_directory_lock()
if not os.path.isdir(directory):
os.makedirs(directory)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
finally:
release_directory_lock()
return os.path.join(directory, bits[-1]).rstrip()
interesting_file_lock = Lock()
def get_interesting_file_lock():
interesting_file_lock.acquire()
def release_interesting_file_lock():
interesting_file_lock.release()
def write_interesting_file(filepath):
try:
get_interesting_file_lock()
with open('interesting_file.txt', 'ab+') as interesting_file:
interesting_file.write(filepath.encode('utf-8'))
interesting_file.write('\n'.encode('utf-8'))
finally:
release_interesting_file_lock()
def downloadFile(filename):
global arguments
print('Downloading {}'.format(filename) + '...')
local_path = get_make_directory_return_filename_path(filename)
local_filename = (filename.split('/')[-1]).rstrip()
print('local {}'.format(local_path))
if local_filename =="":
print("Directory..\n")
else:
r = requests.get(filename.rstrip(), stream=True)
if 'Content-Length' in r.headers:
if int(r.headers['Content-Length']) > arguments.maxsize:
print("This file is greater than the specified max size... skipping...\n")
else:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
r.close()
def print_banner():
print('''\nDescription:
AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
It's similar to a subdomain bruteforcer but is made specifically to S3
buckets and also has some extra features that allow you to grep for
delicous files as well as download interesting files if you're not
afraid to quickly fill up your hard drive.
by Jordan Potti
@ok_bye_now''')
def cleanUp():
print("Cleaning up files...")
def status403(line):
print(line.rstrip() + " is not accessible.")
def queue_up_download(filepath):
download_q.put(filepath)
print('Collectable: {}'.format(filepath))
write_interesting_file(filepath)
def status200(response, grep_list, line):
print("Pilfering "+line.rstrip() + '...')
objects = xmltodict.parse(response.text)
Keys = []
try:
contents = objects['ListBucketResult']['Contents']
if not isinstance(contents, list):
contents = [contents]
for child in contents:
Keys.append(child['Key'])
except KeyError:
pass
for words in Keys:
words = (str(words)).rstrip()
collectable = line+'/'+words
if grep_list is not None and len(grep_list) > 0:
for grep_line in grep_list:
grep_line = (str(grep_line)).rstrip()
if grep_line in words:
queue_up_download(collectable)
break
else:
queue_up_download(collectable)
def main():
global arguments
global grep_list
parser = ArgumentParser()
parser.add_argument("-D", dest="download", required=False, action="store_true", default=False, help="Download files. This requires significant disk space.")
parser.add_argument("-d", dest="savedir", required=False, default='', help="If -D, then -d 1 to create save directories for each bucket with results.")
parser.add_argument("-l", dest="hostlist", required=True, help="")
parser.add_argument("-g", dest="grepwords", required=False, help="Provide a wordlist to grep for.")
parser.add_argument("-m", dest="maxsize", type=int, required=False, default=1024, help="Maximum file size to download.")
parser.add_argument("-t", dest="threads", type=int, required=False, default=1, help="Number of threads.")
if len(sys.argv) == 1:
print_banner()
parser.error("No arguments given.")
parser.print_usage()
sys.exit()
# output parsed arguments into a usable object
arguments = parser.parse_args()
# specify primary variables
if arguments.grepwords:
with open(arguments.grepwords, "r") as grep_file:
grep_content = grep_file.readlines()
grep_list = [g.strip() for g in grep_content]
if arguments.download and arguments.savedir:
print("Downloads enabled (-D), save directories (-d) for each host will be created/used.")
elif arguments.download and not arguments.savedir:
print("Downloads enabled (-D), will be saved to current directory.")
else:
print("Downloads were not enabled (-D), not saving results locally.")
# start up bucket workers
for _ in range(0, arguments.threads):
print('Starting thread...')
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
# start download workers
for i in range(0, arguments.threads):
t = Thread(target=downloadWorker)
t.daemon = True
t.start()
with open(arguments.hostlist) as f:
for line in f:
bucket = 'http://'+line.rstrip()+'.s3.amazonaws.com'
print('Queuing {}'.format(bucket) + '...')
bucket_q.put(bucket)
bucket_q.join()
if arguments.download:
download_q.join()
cleanUp()
if __name__ == "__main__":
main()