@@ -26,7 +26,7 @@ def whois_gather(short_domain):
2626 logging .info ('WHOIS INFO GATHERING: OK' )
2727 w = whois .whois (short_domain )
2828 if w .org is None :
29- w ['org' ] = 'n/a '
29+ w ['org' ] = 'Organization name was not extracted '
3030 logging .info ('WHOIS INFO GATHERING: OK' )
3131 return w
3232 except Exception as e :
@@ -110,7 +110,7 @@ def sm_gather(url):
110110 links = [a ['href' ] for a in soup .find_all ('a' , href = True )]
111111 categorized_links = {'Facebook' : [], 'Twitter' : [], 'Instagram' : [],
112112 'Telegram' : [], 'TikTok' : [], 'LinkedIn' : [],
113- 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : []}
113+ 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : [], 'X.com' : [] }
114114
115115 for link in links :
116116 parsed_url = urlparse (link )
@@ -135,6 +135,8 @@ def sm_gather(url):
135135 categorized_links ['WeChat' ].append (urllib .parse .unquote (link ))
136136 elif hostname and (hostname == 'ok.ru' or hostname .endswith ('.ok.ru' )):
137137 categorized_links ['Odnoklassniki' ].append (urllib .parse .unquote (link ))
138+ elif hostname and (hostname == 'x.com' or hostname .endswith ('.x.com' )):
139+ categorized_links ['X.com' ].append (urllib .parse .unquote (link ))
138140
139141 if not categorized_links ['Odnoklassniki' ]:
140142 categorized_links ['Odnoklassniki' ].append ('Odnoklassniki links were not found' )
@@ -156,6 +158,8 @@ def sm_gather(url):
156158 categorized_links ['Twitter' ].append ('Twitter links were not found' )
157159 if not categorized_links ['Facebook' ]:
158160 categorized_links ['Facebook' ].append ('Facebook links were not found' )
161+ if not categorized_links ['X.com' ]:
162+ categorized_links ['X.com' ].append ('X.com links were not found' )
159163
160164 return categorized_links
161165
@@ -209,7 +213,7 @@ def domains_reverse_research(subdomains, report_file_type):
209213 subdomain_socials_grouped = list (dict (subdomain_socials_grouped ).values ())
210214
211215 sd_socials = {'Facebook' : [], 'Twitter' : [], 'Instagram' : [], 'Telegram' : [], 'TikTok' : [], 'LinkedIn' : [],
212- 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : []}
216+ 'VKontakte' : [], 'YouTube' : [], 'Odnoklassniki' : [], 'WeChat' : [], 'X.com' : [] }
213217
214218 for inner_list in subdomain_socials_grouped :
215219 for link in inner_list :
@@ -234,6 +238,8 @@ def domains_reverse_research(subdomains, report_file_type):
234238 sd_socials ['WeChat' ].append (urllib .parse .unquote (link ))
235239 elif hostname and (hostname == 'ok.ru' or hostname .endswith ('.ok.ru' )):
236240 sd_socials ['Odnoklassniki' ].append (urllib .parse .unquote (link ))
241+ elif hostname and (hostname == 'x.com' or hostname .endswith ('.x.com' )):
242+ sd_socials ['Odnoklassniki' ].append (urllib .parse .unquote (link ))
237243
238244 sd_socials = {k : list (set (v )) for k , v in sd_socials .items ()}
239245
@@ -242,7 +248,7 @@ def domains_reverse_research(subdomains, report_file_type):
242248 if not subdomain_ip :
243249 subdomain_ip = ["No subdomains IP's were found" ]
244250
245- if report_file_type == 'pdf' or report_file_type == ' html' :
251+ if report_file_type == 'html' :
246252 return subdomain_mails , sd_socials , subdomain_ip
247253 elif report_file_type == 'xlsx' :
248254 return subdomain_urls , subdomain_mails , subdomain_ip , sd_socials
0 commit comments