Commit 691be107 authored by prx's avatar prx

multithread. Debut

parent bf66ea67
...@@ -12,6 +12,7 @@ except: ...@@ -12,6 +12,7 @@ except:
from .i18nlib import _ from .i18nlib import _
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
def do_req_get(url, ret, keys=None): def do_req_get(url, ret, keys=None):
""" """
...@@ -245,7 +246,8 @@ class TorrentSearch: ...@@ -245,7 +246,8 @@ class TorrentSearch:
elif self.searchengine == 'isohunt': elif self.searchengine == 'isohunt':
s = [res for res in self._isohunt(sch)] s = [res for res in self._isohunt(sch)]
elif self.searchengine == 'torrent9': elif self.searchengine == 'torrent9':
s = [res for res in self._torrent9(sch)] for res in self._torrent9(sch):
yield res
elif self.searchengine == 'alphareign': elif self.searchengine == 'alphareign':
s = [res for res in self._alphareign(sch, page)] s = [res for res in self._alphareign(sch, page)]
elif self.searchengine == 'btdb': elif self.searchengine == 'btdb':
...@@ -261,28 +263,17 @@ class TorrentSearch: ...@@ -261,28 +263,17 @@ class TorrentSearch:
except Exception as e: except Exception as e:
print(e) print(e)
s = [] s = []
return s return s
def _search_all(self, sch): def _search_all(self, sch):
s = [] s = []
searchengine_list = [self._piratebay, self._kickasstorrents, self._isohunt, self._torrent9, self._alphareign, self._btdb, self._digbt, self._p2psearch] searchengine_list = [self._piratebay, self._kickasstorrents, self._isohunt, self._torrent9, self._alphareign, self._btdb, self._digbt, self._p2psearch]
if sys.version_info >= (3, 0):
with ThreadPoolExecutor(max_workers=5) as e: with ThreadPoolExecutor(max_workers=5) as e:
for searchengine in searchengine_list: for searchengine in searchengine_list:
try : try :
e.map(s.append, searchengine(sch)) e.map(s.append, searchengine(sch))
except:pass except Exception as e:
else: print(e)
s += [res for res in self._piratebay(sch)]
s += [res for res in self._kickasstorrents(sch)]
s += [res for res in self._isohunt(sch)]
s += [res for res in self._torrent9(sch)]
s += [res for res in self._alphareign(sch)]
s += [res for res in self._btdb(sch)]
s += [res for res in self._digbt(sch)]
s += [res for res in self._p2psearch(sch)]
return s return s
def _btdigg(self, sch, page=0): def _btdigg(self, sch, page=0):
...@@ -498,13 +489,9 @@ class TorrentSearch: ...@@ -498,13 +489,9 @@ class TorrentSearch:
seeds_results = soup.find_all("td", class_="sy") seeds_results = soup.find_all("td", class_="sy")
size_results = soup.find_all("td", class_="size-row") size_results = soup.find_all("td", class_="size-row")
if sys.version_info > (3, 0):
with ThreadPoolExecutor(max_workers=5) as executor: with ThreadPoolExecutor(max_workers=5) as executor:
for res in executor.map(isohunt_parse_results, zip(row, seeds_results, size_results)): for res in executor.map(isohunt_parse_results, zip(row, seeds_results, size_results)):
yield res yield res
else:
for z in zip(row, seeds_results, size_results):
yield(isohunt_parse_results(z))
def _torrent9(self, sch, page=0): def _torrent9(self, sch, page=0):
...@@ -522,28 +509,20 @@ class TorrentSearch: ...@@ -522,28 +509,20 @@ class TorrentSearch:
s = do_req_get(url, "text") s = do_req_get(url, "text")
soup = bs4.BeautifulSoup(s, "html.parser") soup = bs4.BeautifulSoup(s, "html.parser")
tmplist = []
name = str() name = str()
magnet = str() magnet = str()
size = str() size = str()
seeds = str() seeds = str()
leechs = str() leechs = str()
for i in soup.find_all('tbody'): toparse = soup.find('tbody')
for d in i.find_all('td'): for d in toparse.find_all('td'):
if d.find('a'): if d.find('a'):
href = d.find('a')['href'] magnet = d.find('a')['href']
# le titre est dans des <span>... # le titre est dans des <span>...
name = [ s.string for s in d.a.find_all('span') ] name = [ s.string for s in d.a.find_all('span') ]
name = ' '.join(name) name = ' '.join(name)
# get magnet link
ms = do_req_get("{}/{}".format(baseurl,href), "text")
magnetsoup = bs4.BeautifulSoup(ms, "html.parser")
for m in magnetsoup.find_all('a', class_='download'):
if m['href'].startswith('magnet'):
magnet = m['href']
break
else: else:
if not size: if not size:
size = d.string size = d.string
...@@ -553,15 +532,29 @@ class TorrentSearch: ...@@ -553,15 +532,29 @@ class TorrentSearch:
leechs = d.string leechs = d.string
if name and magnet and size and seeds and leechs : if name and magnet and size and seeds and leechs :
res = {'name': name, 'magnet': magnet, 'size': size, 'seeds': seeds, "leechs": leechs} res = {'name': name, 'magnet': magnet, 'size': size, 'seeds': seeds, "leechs": leechs}
tmplist.append(res)
name = str() name = str()
magnet = str() magnet = str()
size = str() size = str()
seeds = str() seeds = str()
leechs = str() leechs = str()
yield res with ThreadPoolExecutor(max_workers=5) as executor:
future_mgt = { executor.submit(do_req_get, "{}/{}".format(baseurl,res['magnet']), "text") : res for res in tmplist }
for f in as_completed(future_mgt):
item = future_mgt[f]
ms = f.result()
magnetsoup = bs4.BeautifulSoup(ms, "html.parser")
for m in magnetsoup.find_all('a', class_='download'):
if m['href'].startswith('magnet'):
magnet = m['href']
break
item['magnet'] = magnet
yield item
def _alphareign(self, sch, page=0): def _alphareign(self, sch, page=0):
""" """
...@@ -699,7 +692,6 @@ class TorrentSearch: ...@@ -699,7 +692,6 @@ class TorrentSearch:
size : size size : size
seed : number of seeds seed : number of seeds
""" """
page = int(page) + 1 page = int(page) + 1
if page < 10: if page < 10:
page = page * 10 page = page * 10
...@@ -739,11 +731,6 @@ class TorrentSearch: ...@@ -739,11 +731,6 @@ class TorrentSearch:
yield res yield res
def isohunt_parse_results(z): def isohunt_parse_results(z):
""" """
parse isohunt results parse isohunt results
...@@ -771,12 +758,11 @@ def isohunt_parse_results(z): ...@@ -771,12 +758,11 @@ def isohunt_parse_results(z):
if __name__ == '__main__': if __name__ == '__main__':
with TorrentSearch('p2psearch') as ts: with TorrentSearch('torrent9') as ts:
s = ts.search('vikings vostfr', 0) for result in ts.search('vikings vostfr', 0):
#s = omdb_sch("vikings") print(result)
print(s)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment