Commit 154f8fe1 authored by Your Name's avatar Your Name
Browse files

Simple spider

parent 50bda366
#!/usr/bin/python3
import urllib.request, urllib.error, urllib.parse
import argparse
from pprint import pprint
from bs4 import BeautifulSoup
import signal
# Download a page
def dl_page(url, visited_links_list):
print("Dl: " + url)
if url not in visited_links_list:
visited_links_list.append(url)
try:
response = urllib.request.urlopen(url)
except:
return ""
return response.read()
def display_start_info():
print("Scanning from: " + args.url)
def extract_all_links(html_page, links):
if html_page:
soup = BeautifulSoup(html_page, 'html.parser')
for a in soup.find_all('a', href=True):
if a['href'].startswith('/'):
add_uniq_links(links, transform_relative_into_abs_links(args.url, a['href']))
else:
if a['href'].startswith(args.url):
add_uniq_links(links, a['href'])
def transform_relative_into_abs_links(baseurl, relative_path):
return baseurl + relative_path
def add_uniq_links(source, link):
if link not in source:
pprint("link: " + str(link))
source.append(link)
def print_result(visited_page):
print("==================================")
print("Nb links visited: " +str(len(visited_page)))
visited_page.sort()
for l in visited_page:
print(l)
if __name__ == "__main__":
visited_page = []
to_visit = []
parser = argparse.ArgumentParser(description='Simple Spider write on python3 by AGS')
parser.add_argument('url', help='url to start spidering')
args = parser.parse_args()
display_start_info()
to_visit.append(args.url)
while len(to_visit) > 0:
page = dl_page(to_visit.pop(), visited_page)
extract_all_links(page, to_visit)
print_result(visited_page)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment