-
Notifications
You must be signed in to change notification settings - Fork 295
Expand file tree
/
Copy pathutils.py
More file actions
346 lines (286 loc) · 10.8 KB
/
utils.py
File metadata and controls
346 lines (286 loc) · 10.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
"""
Helper functions for network requests, etc
"""
import time
import sys
import datetime
import re
import csv
import json
import ipaddress
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from urllib.parse import urlparse
try:
import requests
import dns
import dns.resolver
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
from concurrent.futures._base import TimeoutError
except ImportError:
print("[!] Please pip install requirements.txt.")
sys.exit()
LOGFILE = False
LOGFILE_FMT = ''
def init_logfile(logfile, fmt):
"""
Initialize the global logfile if specified as a user-supplied argument
"""
if logfile:
global LOGFILE
LOGFILE = logfile
global LOGFILE_FMT
LOGFILE_FMT = fmt
now = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
with open(logfile, 'a', encoding='utf-8') as log_writer:
log_writer.write(f"\n\n#### CLOUD_ENUM {now} ####\n")
def is_valid_domain(domain):
"""
Checks if the domain has a valid format and length
"""
# Check for domain total length
if len(domain) > 253: # According to DNS specifications
return False
# Check each label in the domain
for label in domain.split('.'):
# Each label should be between 1 and 63 characters long
if not (1 <= len(label) <= 63):
return False
return True
def get_url_batch(url_list, use_ssl=False, callback='', threads=5, redir=True):
"""
Processes a list of URLs, sending the results back to the calling
function in real-time via the `callback` parameter
"""
# Start a counter for a status message
tick = {}
tick['total'] = len(url_list)
tick['current'] = 0
# Filter out invalid URLs
url_list = [url for url in url_list if is_valid_domain(url)]
# Break the url list into smaller lists based on thread size
queue = [url_list[x:x+threads] for x in range(0, len(url_list), threads)]
# Define the protocol
if use_ssl:
proto = 'https://'
else:
proto = 'http://'
# Using the async requests-futures module, work in batches based on
# the 'queue' list created above. Call each URL, sending the results
# back to the callback function.
for batch in queue:
# I used to initialize the session object outside of this loop, BUT
# there were a lot of errors that looked related to pool cleanup not
# happening. Putting it in here fixes the issue.
# There is an unresolved discussion here:
# https://github.com/ross/requests-futures/issues/20
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=threads+5))
batch_pending = {}
batch_results = {}
# First, grab the pending async request and store it in a dict
for url in batch:
batch_pending[url] = session.get(proto + url, allow_redirects=redir)
# Then, grab all the results from the queue.
# This is where we need to catch exceptions that occur with large
# fuzz lists and dodgy connections.
for url in batch_pending:
try:
# Timeout is set due to observation of some large jobs simply
# hanging forever with no exception raised.
batch_results[url] = batch_pending[url].result(timeout=30)
except requests.exceptions.ConnectionError as error_msg:
print(f" [!] Connection error on {url}:")
print(error_msg)
except TimeoutError:
print(f" [!] Timeout on {url}. Investigate if there are"
" many of these")
# Now, send all the results to the callback function for analysis
# We need a way to stop processing unnecessary brute-forces, so the
# callback may tell us to bail out.
for url in batch_results:
check = callback(batch_results[url])
if check == 'breakout':
return
# Refresh a status message
tick['current'] += threads
sys.stdout.flush()
sys.stdout.write(f" {tick['current']}/{tick['total']} complete...")
sys.stdout.write('\r')
# Clear the status message
sys.stdout.write(' \r')
def read_nameservers(file_path):
"""
Reads nameservers from a given file.
Each line in the file should contain one nameserver IP address.
Lines starting with '#' will be ignored as comments.
"""
try:
with open(file_path, 'r') as file:
nameservers = [line.strip() for line in file if line.strip() and not line.startswith('#')]
if not nameservers:
raise ValueError("Nameserver file is empty or only contains comments")
return nameservers
except FileNotFoundError:
print(f"Error: File '{file_path}' not found.")
exit(1)
except ValueError as e:
print(e)
exit(1)
def is_valid_ip(address):
try:
ipaddress.ip_address(address)
return True
except ValueError:
return False
def dns_lookup(nameserver, name):
"""
This function performs the actual DNS lookup when called in a threadpool
by the fast_dns_lookup function.
"""
nameserverfile = False
if not is_valid_ip(nameserver):
nameserverfile = nameserver
res = dns.resolver.Resolver()
res.timeout = 3
if nameserverfile:
nameservers = read_nameservers(nameserverfile)
res.nameservers = nameservers
else:
res.nameservers = [nameserver]
tries = 0
while tries < 3:
try:
res.query(name)
# If no exception is thrown, return the valid name
return name
except dns.resolver.NXDOMAIN:
return ''
except dns.resolver.NoNameservers as exc_text:
print(" [!] Error querying nameservers! This could be a problem.")
print(" [!] If you're using a VPN, try setting --ns to your VPN's nameserver.")
print(" [!] Bailing because you need to fix this")
print(" [!] More Info:")
print(exc_text)
return '-#BREAKOUT_DNS_ERROR#-'
except dns.exception.Timeout:
tries += 1
print(f" [!] DNS lookup for {name} timed out after 3 tries. Investigate if there are many of these.")
return ''
def fast_dns_lookup(names, nameserver, nameserverfile, callback='', threads=5):
"""
Helper function to resolve DNS names. Uses multithreading.
"""
total = len(names)
current = 0
valid_names = []
print(f"[*] Brute-forcing a list of {total} possible DNS names")
# Filter out invalid domains
names = [name for name in names if is_valid_domain(name)]
# Break the url list into smaller lists based on thread size
queue = [names[x:x+threads] for x in range(0, len(names), threads)]
for batch in queue:
pool = ThreadPool(threads)
# Because pool.map takes only a single function arg, we need to
# define this partial so that each iteration uses the same ns
if nameserverfile:
dns_lookup_params = partial(dns_lookup, nameserverfile)
else:
dns_lookup_params = partial(dns_lookup, nameserver)
results = pool.map(dns_lookup_params, batch)
# We should now have the batch of results back, process them.
for name in results:
if name:
if name == '-#BREAKOUT_DNS_ERROR#-':
sys.exit()
if callback:
callback(name)
valid_names.append(name)
current += threads
# Update the status message
sys.stdout.flush()
sys.stdout.write(f" {current}/{total} complete...")
sys.stdout.write('\r')
pool.close()
# Clear the status message
sys.stdout.write(' \r')
return valid_names
def list_bucket_contents(bucket):
"""
Provides a list of full URLs to each open bucket
"""
key_regex = re.compile(r'<(?:Key|Name)>(.*?)</(?:Key|Name)>')
reply = requests.get(bucket)
# Make a list of all the relative-path key name
keys = re.findall(key_regex, reply.text)
# Need to remove URL parameters before appending file names
# from Azure buckets
sub_regex = re.compile(r'(\?.*)')
bucket = sub_regex.sub('', bucket)
# Format them to full URLs and print to console
if keys:
print(" FILES:")
for key in keys:
url = bucket + key
print(f" ->{url}")
else:
print(" ...empty bucket, so sad. :(")
def fmt_output(data):
"""
Handles the output - printing and logging based on a specified format
"""
# ANSI escape sequences are set based on accessibility of target
# (basically, how public it is))
bold = '\033[1m'
end = '\033[0m'
if data['access'] == 'public':
ansi = bold + '\033[92m' # green
if data['access'] == 'protected':
ansi = bold + '\033[33m' # orange
if data['access'] == 'disabled':
ansi = bold + '\033[31m' # red
sys.stdout.write(' ' + ansi + data['msg'] + ': ' + data['target'] + end + '\n')
if LOGFILE:
with open(LOGFILE, 'a', encoding='utf-8') as log_writer:
if LOGFILE_FMT == 'text':
log_writer.write(f'{data["msg"]}: {data["target"]}\n')
if LOGFILE_FMT == 'csv':
writer = csv.DictWriter(log_writer, data.keys())
writer.writerow(data)
if LOGFILE_FMT == 'json':
log_writer.write(json.dumps(data) + '\n')
def get_brute(brute_file, mini=1, maxi=63, banned='[^a-z0-9_-]'):
"""
Generates a list of brute-force words based on length and allowed chars
"""
# Read the brute force file into memory
with open(brute_file, encoding="utf8", errors="ignore") as infile:
names = infile.read().splitlines()
# Clean up the names to usable for containers
banned_chars = re.compile(banned)
clean_names = []
for name in names:
name = name.lower()
name = banned_chars.sub('', name)
if maxi >= len(name) >= mini:
if name not in clean_names:
clean_names.append(name)
return clean_names
def start_timer():
"""
Starts a timer for functions in main module
"""
# Start a counter to report on elapsed time
start_time = time.time()
return start_time
def stop_timer(start_time):
"""
Stops timer and prints a status
"""
# Stop the timer
elapsed_time = time.time() - start_time
formatted_time = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
# Print some statistics
print("")
print(f" Elapsed time: {formatted_time}")
print("")