Skip to content

Commit

Permalink
Update linkfinder
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohitash465 authored Jun 3, 2024
1 parent ffb572e commit de70954
Showing 1 changed file with 211 additions and 46 deletions.
257 changes: 211 additions & 46 deletions linkfinder
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,23 @@ import os, sys, time, socket, threading, argparse, requests, subprocess
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin

def get_all_urls(url, text):
try:
soup = BeautifulSoup(text, 'html.parser')
base_url = urlparse(url).scheme + "://" + urlparse(url).netloc
urls = set()
for link in soup.find_all('a', href=True):
href = link.get('href')
full_url = urljoin(base_url, href)
urls.add(full_url)
return urls
except requests.exceptions.RequestException as e:
print("Error:", e)
return set()
def get_all_urls(data):
data_list = data.split()
urls = []
for data in data_list:
if "http://" in data or "https://" in data:
ur = data.split("'")
for urr in ur:
ur2 = urr.split("\"")
for urrr in ur2:
ur3 = urrr.split("(")
for urrrr in ur3:
ur4 = urrrr.split(")")
for dd in ur4:
if "http://" in dd or "https://" in dd:
if dd not in urls:
urls.append(dd)
return urls

def internet_connection():
try:
Expand All @@ -37,6 +41,18 @@ def home_logo():
IHA089: Navigating the Digital Realm with Code and Security - Where Programming Insights Meet Cyber Vigilance.
""")

def About():
print("""Welcome to IHA089, your premier source for cutting-edge cybersecurity solutions. At IHA089, we specialize in developing tools designed to enhance the security and integrity of your digital environment.

We understand the importance of reliable and efficient cybersecurity solutions, which is why we focus on creating tools that are not only powerful but also user-friendly. Our tools are designed to streamline security processes, making it easier for organizations to protect their assets and maintain a secure operational framework.
""")
print("Website ::: https://iha089.org.in")
print("Github ::: https://github.com/IHA089")
print("Instagram ::: https://www.instagram.com/IHA089")
print("Telegram ::: https://t.me/IHATron")
print("youtube ::: https://youtube.com/@iha089")
print("Twiter ::: https://twitter.com/iha089")

def split_url_list(input_list, chunk_size):
return [input_list[i:i+chunk_size] for i in range(0, len(input_list), chunk_size)]

Expand Down Expand Up @@ -116,33 +132,58 @@ def create_thread(url_set, timeout, num_threads, output_file, enable_tor_proxy,
for thread in threads:
thread.join()

def beautify_js(js_code):
try:
import jsbeautifier
options = jsbeautifier.default_options()
options.indent_size = 4
beautified_js = jsbeautifier.beautify(js_code, options)
return beautified_js
except ImportError:
print("Please install jsbeautifier python module")
sys.exit()

def Main():
parser = argparse.ArgumentParser(description='linkfinder')
parser.add_argument('-about', '--about', action='store_true', help='About IHA089')
parser.add_argument('-u', '--url', type=str, help='target url')
parser.add_argument('-r', '--response_status', action='store_true', help='show repose of each url')
parser.add_argument('-t', '--timeout', type=int, help='set timeout(defalut 10 sec.)', default=5)
parser.add_argument('-i', '--input', type=str, help='check input file url status')
parser.add_argument('-o', '--output', type=str, help='redirect output in file')
parser.add_argument('-n', '--num_threads', type=int, help='Number of threads for concurrent requests', default=5)
parser.add_argument('-all', '--get_all_url', action='store_true', help='extract all url(including images, videos, fonts, css)')
parser.add_argument('-filter', '--filter_data', type=str, help='filter specific urls(Ex. .js, author, admin)')
parser.add_argument('-tor', '--enable_tor_proxy', action='store_true', help='use tor proxy for your privacy')
parser.add_argument('-proxy_ip', '--proxy_ip', type=str, help='set proxy IP')
parser.add_argument('-proxy_port', '--proxy_port', type=int, help='set proxy PORT')

args = parser.parse_args()
about = args.about
url = args.url
check_response_status = args.response_status
timeout = args.timeout
input_file = args.input
output_file = args.output
num_threads = args.num_threads
get_all_url = args.get_all_url
filter_data = args.filter_data
enable_tor_proxy = args.enable_tor_proxy
proxy_ip = args.proxy_ip
proxy_port = args.proxy_port


im_data = ['.png', '.PNG', '.webp', '.jpg', '.JPG', '.JPEG', '.jpeg', '.avif', '.mp3', '.mp4', '.gif', '.GIF']

if about:
home_logo()
About()
sys.exit()

if not internet_connection():
print("No internet connection.")
sys.exit()

home_logo()

if enable_tor_proxy:
Expand Down Expand Up @@ -175,40 +216,124 @@ def Main():
req = requests.get(url)

if req.status_code==200:
print("Scanning all url of ::: {}".format(url))
all_url = get_all_urls(url, req.text)
if check_response_status:
create_thread(all_url, timeout, num_threads, output_file, enable_tor_proxy, proxy_ip, proxy_port)
else:
if ".js" in url:
data = req.text
b_js = beautify_js(data)
if output_file:
with open(output_file, "a") as writer:
for url in all_url:
writer.write(url+"\n")
writer.close()
writer.write(b_js)
else:
for url in all_url:
print(url)
else:
url = "http://"+url
if enable_tor_proxy:
req = requests.get(url, proxies=create_proxy())
print(b_js)
else:
req = requests.get(url)

if req.status_code==200:
print("Scanning all url of ::: {}".format(url))
all_url = get_all_urls(url, req.text)
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
all_url = get_all_urls(req.text)
if check_response_status:
create_thread(all_url, timeout, num_threads, output_file, enable_tor_proxy, proxy_ip, proxy_port)
else:
if output_file:
with open(output_file, "a") as writer:
for url in all_url:
writer.write(url+"\n")
if filter_data:
if filter_data in url:
dd = url
else:
dd=""
else:
if get_all_url:
dd = url
else:
for im in im_data:
if im in url:
dd=""
break
else:
dd = url
if dd:
writer.write(dd+"\n")
writer.close()
else:
for url in all_url:
print(url)
if filter_data:
if filter_data in url:
dd = url
else:
dd=""
else:
if get_all_url:
dd = url
else:
for im in im_data:
if im in url:
dd=""
break
else:
dd = url
if dd:
print(dd)
else:
url = "http://"+url
if enable_tor_proxy:
req = requests.get(url, proxies=create_proxy())
else:
req = requests.get(url)

if req.status_code==200:
if ".js" in url:
data = req.text
b_js = beautify_js(data)
if output_file:
with open(output_file, "a") as writer:
writer.write(b_js)
else:
print(b_js)
else:
print("Scanning all url of ::: {}".format(url))
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
all_url = get_all_urls(req.text)
if check_response_status:
create_thread(all_url, timeout, num_threads, output_file, enable_tor_proxy, proxy_ip, proxy_port)
else:
if output_file:
with open(output_file, "a") as writer:
for url in all_url:
if filter_data:
if filter_data in url:
dd = url
else:
dd=""
else:
if get_all_url:
dd = url
else:
for im in im_data:
if im in url:
dd=""
break
else:
dd = url
if dd:
writer.write(dd+"\n")
writer.close()
else:
for url in all_url:
if filter_data:
if filter_data in url:
dd = url
else:
dd=""
else:
if get_all_url:
dd = url
else:
for im in im_data:
if im in url:
dd=""
break
else:
dd = url
if dd:
print(dd)
else:
print("Failed to fetch data of {}".format(url))
print("response code ::: {}".format(req.status_code))
Expand All @@ -221,19 +346,61 @@ def Main():
req = requests.get(url)

if req.status_code==200:
print("Scanning all url of ::: {}".format(url))
all_url = get_all_urls(url, req.text)
if check_response_status:
create_thread(all_url, timeout, num_threads, output_file, enable_tor_proxy, proxy_ip, proxy_port)
else:
if ".js" in url:
data = req.text
b_js = beautify_js(data)
if output_file:
with open(output_file, "a") as writer:
for url in all_url:
writer.write(url+"\n")
writer.close()
writer.write(b_js)
else:
for url in all_url:
print(url)
print(b_js)
else:
print("Scanning all url of ::: {}".format(url))
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
all_url = get_all_urls(req.text)
if check_response_status:
create_thread(all_url, timeout, num_threads, output_file, enable_tor_proxy, proxy_ip, proxy_port)
else:
if output_file:
with open(output_file, "a") as writer:
for url in all_url:
if filter_data:
if filter_data in url:
dd = url
else:
dd=""
else:
if get_all_url:
dd = url
else:
for im in im_data:
if im in url:
dd=""
break
else:
dd = url
if dd:
writer.write(dd+"\n")
writer.close()
else:
for url in all_url:
if filter_data:
if filter_data in url:
dd = url
else:
dd=""
else:
if get_all_url:
dd = url
else:
for im in im_data:
if im in url:
dd=""
break
else:
dd = url
if dd:
print(dd)
else:
print("Failed to fetch data")
print("Response code ::: {}".format(req.status_code))
Expand All @@ -242,5 +409,3 @@ def Main():

if __name__ == "__main__":
Main()


0 comments on commit de70954

Please sign in to comment.