forked from initstring/cloud_enum
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
234 lines (196 loc) · 7.11 KB
/
utils.py
File metadata and controls
234 lines (196 loc) · 7.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
"""
Helper functions for network requests, etc
"""
import time
import sys
import datetime
import re
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
try:
import requests
import dns
import dns.resolver
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
from concurrent.futures._base import TimeoutError
except ImportError:
print("[!] Please pip install requirements.txt.")
sys.exit()
LOGFILE = False
def init_logfile(logfile):
"""
Initialize the global logfile if specified as a user-supplied argument
"""
if logfile:
global LOGFILE
LOGFILE = logfile
now = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
with open(logfile, 'a') as log_writer:
log_writer.write("\n\n#### CLOUD_ENUM {} ####\n"
.format(now))
def get_url_batch(url_list, use_ssl=False, callback='', threads=5):
"""
Processes a list of URLs, sending the results back to the calling
function in real-time via the `callback` parameter
"""
# Start a counter for a status message
tick = {}
tick['total'] = len(url_list)
tick['current'] = 0
# Break the url list into smaller lists based on thread size
queue = [url_list[x:x+threads] for x in range(0, len(url_list), threads)]
# Define the protocol
if use_ssl:
proto = 'https://'
else:
proto = 'http://'
# Start a requests object
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=threads))
# Using the async requests-futures module, work in batches based on
# the 'queue' list created above. Call each URL, sending the results
# back to the callback function.
for batch in queue:
batch_pending = {}
batch_results = {}
# First, grab the pending async request and store it in a dict
for url in batch:
batch_pending[url] = session.get(proto + url)
# Then, grab all the results from the queue.
# This is where we need to catch exceptions that occur with large
# fuzz lists and dodgy connections.
for url in batch_pending:
try:
# Timeout is set due to observation of some large jobs simply
# hanging forever with no exception raised.
batch_results[url] = batch_pending[url].result(timeout=30)
except requests.exceptions.ConnectionError:
print(" [!] Connection error on {}. Investigate if there"
" are many of these.".format(url))
except TimeoutError:
print(" [!] Timeout on {}. Investigate if there are"
" many of these".format(url))
# Now, send all the results to the callback function for analysis
# We need a way to stop processing unnecessary brute-forces, so the
# callback may tell us to bail out.
for url in batch_results:
check = callback(batch_results[url])
if check == 'breakout':
return
# Refresh a status message
tick['current'] += threads
sys.stdout.flush()
sys.stdout.write(" {}/{} complete..."
.format(tick['current'], tick['total']))
sys.stdout.write('\r')
# Clear the status message
sys.stdout.write(' \r')
def dns_lookup(nameserver, name):
"""
This function performs the actual DNS lookup when called in a threadpool
by the fast_dns_lookup function.
"""
res = dns.resolver.Resolver()
res.timeout = 10
res.nameservers = [nameserver]
try:
res.query(name)
# If no exception is thrown, return the valid name
return name
except dns.resolver.NXDOMAIN:
return ''
except dns.exception.Timeout:
print(" [!] DNS Timeout on {}. Investigate if there are many"
" of these.".format(name))
def fast_dns_lookup(names, nameserver, callback='', threads=5):
"""
Helper function to resolve DNS names. Uses multithreading.
"""
total = len(names)
current = 0
valid_names = []
print("[*] Brute-forcing a list of {} possible DNS names".format(total))
# Break the url list into smaller lists based on thread size
queue = [names[x:x+threads] for x in range(0, len(names), threads)]
for batch in queue:
pool = ThreadPool(threads)
# Because pool.map takes only a single function arg, we need to
# define this partial so that each iteration uses the same ns
dns_lookup_params = partial(dns_lookup, nameserver)
results = pool.map(dns_lookup_params, batch)
# We should now have the batch of results back, process them.
for name in results:
if name:
if callback:
callback(name)
valid_names.append(name)
current += threads
# Update the status message
sys.stdout.flush()
sys.stdout.write(" {}/{} complete...".format(current, total))
sys.stdout.write('\r')
pool.close()
# Clear the status message
sys.stdout.write(' \r')
return valid_names
def list_bucket_contents(bucket):
"""
Provides a list of full URLs to each open bucket
"""
key_regex = re.compile(r'<(?:Key|Name)>(.*?)</(?:Key|Name)>')
reply = requests.get(bucket)
# Make a list of all the relative-path key name
keys = re.findall(key_regex, reply.text)
# Need to remove URL parameters before appending file names
# from Azure buckets
sub_regex = re.compile(r'(\?.*)')
bucket = sub_regex.sub('', bucket)
# Format them to full URLs and print to console
if keys:
printc(" FILES:\n", 'none')
for key in keys:
url = bucket + key
printc(" ->{}\n".format(url), 'none')
else:
printc(" ...empty bucket, so sad. :(\n", 'none')
def printc(text, color):
"""
Prints colored text to screen
"""
# ANSI escape sequences
green = '\033[92m'
orange = '\033[33m'
red = '\033[31m'
bold = '\033[1m'
end = '\033[0m'
if color == 'orange':
sys.stdout.write(bold + orange + text + end)
if color == 'green':
sys.stdout.write(bold + green + text + end)
if color == 'red':
sys.stdout.write(bold + red + text + end)
if color == 'black':
sys.stdout.write(bold + text + end)
if color == 'none':
sys.stdout.write(text)
if LOGFILE:
with open(LOGFILE, 'a') as log_writer:
log_writer.write(text.lstrip())
def start_timer():
"""
Starts a timer for functions in main module
"""
# Start a counter to report on elapsed time
start_time = time.time()
return start_time
def stop_timer(start_time):
"""
Stops timer and prints a status
"""
# Stop the timer
elapsed_time = time.time() - start_time
formatted_time = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
# Print some statistics
print("")
print(" Elapsed time: {}".format(formatted_time))
print("")