Skip to content

Commit

Permalink
Merge branch 'main' of github.com:elfhosted/plex_debrid
Browse files Browse the repository at this point in the history
  • Loading branch information
funkypenguin committed Jul 23, 2024
2 parents 617993a + d59a66f commit 9676555
Show file tree
Hide file tree
Showing 7 changed files with 152 additions and 7 deletions.
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,15 @@ If github is not your cup of tea;
> - You can find a full list of all possible parameters and their respective values at "https://panel.orionoid.com/" in the "Developers" menu, section "API Docs" under "Stream API".
>
></details>
>
><details>
> <summary><b><u>zilean:</u></b></summary>
>
> - Zilean is a service that allows you to search for [DebridMediaManager](https://github.com/debridmediamanager/debrid-media-manager) sourced arr-less content.
> - You can integrate zilean into plex_debrid by navigating to '/Settings/Scraper/Sources/Edit/Add source/zilean'.
> - Details of this project can be found at https://github.com/iPromKnight/zilean
>
></details>
### :arrow_down_small: Debrid Services:

Expand Down
12 changes: 7 additions & 5 deletions content/classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -926,17 +926,19 @@ def watch(self):
retries = int(float(trigger[2]))
if retries == 0:
return

message = 'retrying download in ' + str(
round(int(ui_settings.loop_interval_seconds) / 60)) + 'min for item: ' + self.query() + ' - version/s [' + '],['.join(
names) + ']'
if not self in media.ignore_queue:
self.ignored_count = 1
media.ignore_queue += [self]
ui_print('retrying download in 30min for item: ' + self.query() + ' - version/s [' + '],['.join(
names) + '] - attempt ' + str(self.ignored_count) + '/' + str(retries))
ui_print(message + ' - attempt ' + str(self.ignored_count) + '/' + str(retries))
else:
match = next((x for x in media.ignore_queue if self == x), None)
if match.ignored_count < retries:
match.ignored_count += 1
ui_print('retrying download in 30min for item: ' + self.query() + ' - version/s [' + '],['.join(
names) + '] - attempt ' + str(match.ignored_count) + '/' + str(retries))
ui_print(message + ' - attempt ' + str(match.ignored_count) + '/' + str(retries))
else:
media.ignore_queue.remove(match)
ignore.add(self)
Expand Down Expand Up @@ -1366,7 +1368,7 @@ def download(self, retries=0, library=[], parentReleases=[]):
if len(self.Episodes) > 2:
if self.season_pack(scraped_releases):
debrid_downloaded, retry = self.debrid_download()
# if scraper.traditional() or debrid_downloaded:
if scraper.traditional() or debrid_downloaded:
for episode in self.Episodes:
episode.skip_scraping = True
# If there was nothing downloaded, scrape specifically for this season
Expand Down
3 changes: 2 additions & 1 deletion scraper/services/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,11 @@
from scraper.services import orionoid
from scraper.services import nyaa
from scraper.services import torrentio
from scraper.services import zilean

#define subclass method
def __subclasses__():
return [rarbg,x1337,jackett,prowlarr,orionoid,nyaa,torrentio]
return [rarbg,x1337,jackett,prowlarr,orionoid,nyaa,torrentio,zilean]

active = ['torrentio']
overwrite = []
Expand Down
130 changes: 130 additions & 0 deletions scraper/services/zilean.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
# import modules
from base import *
from ui.ui_print import *
import urllib.parse
import releases
import re

base_url = "http://localhost:8181"
name = "zilean"
timeout_sec = 10
session = requests.Session()


def setup(cls, new=False):
from settings import settings_list
from scraper.services import active
settings = []
for category, allsettings in settings_list:
for setting in allsettings:
if setting.cls == cls:
settings += [setting]
if settings == []:
if not cls.name in active:
active += [cls.name]
back = False
if not new:
while not back:
print("0) Back")
indices = []
for index, setting in enumerate(settings):
print(str(index + 1) + ') ' + setting.name)
indices += [str(index + 1)]
print()
if settings == []:
print("Nothing to edit!")
print()
time.sleep(3)
return
choice = input("Choose an action: ")
if choice in indices:
settings[int(choice) - 1].input()
if not cls.name in active:
active += [cls.name]
back = True
elif choice == '0':
back = True
else:
print()
indices = []
for setting in settings:
if setting.name == "Zilean Base URL":
setting.setup()
if not cls.name in active:
active += [cls.name]


def scrape(query, altquery):
from scraper.services import active
ui_print("[zilean] searching for " + query + " accepting titles that regex match " + altquery)
global base_url
scraped_releases = []
if not 'zilean' in active:
return scraped_releases

matches_regex = altquery
if altquery == "(.*)":
matches_regex = query
media_type = "show" if regex.search(r'(S[0-9]|complete|S\?[0-9])', matches_regex, regex.I) else "movie"

opts = []
title = query
if media_type == "show":
s = (regex.search(r'(?<=S)([0-9]+)', matches_regex, regex.I).group()
if regex.search(r'(?<=S)([0-9]+)', matches_regex, regex.I) else None)
e = (regex.search(r'(?<=E)([0-9]+)', matches_regex, regex.I).group()
if regex.search(r'(?<=E)([0-9]+)', matches_regex, regex.I) else None)
if s is not None and int(s) != 0:
opts.append('season=' + str(int(s)))
if e is not None and int(e) != 0:
opts.append('episode=' + str(int(e)))
title = re.sub(r'S[0-9]+', '', title, flags=re.IGNORECASE).strip()
title = re.sub(r'E[0-9]+', '', title, flags=re.IGNORECASE).strip()
else:
# find year match at the end of the query string
year_regex = regex.search(r'(.*)\.([12][0-9]{3})$', query, regex.I)
if year_regex:
opts.append('year=' + year_regex.group(2))
title = year_regex.group(1)

title = title.replace('.', ' ').replace('?', ' ').strip()
opts.append('query=' + urllib.parse.quote(title))

if base_url.endswith('/'):
base_url = base_url[:-1]
search_url = base_url + "/dmm/filtered?" + '&'.join(opts)

try:
ui_print("[zilean] using search URL: " + search_url)
response = session.get(search_url, timeout=timeout_sec)

if not response.status_code == 200:
ui_print('[zilean] error ' + str(
response.status_code) + ': failed response from zilean. ' + response.content)
return []

except requests.exceptions.Timeout:
ui_print('[zilean] error: zilean request timed out.')
return []
except:
ui_print(
'[zilean] error: zilean couldn\'t be reached. Make sure your zilean base url [' + base_url + '] is correctly formatted.')
return []

try:
response = json.loads(response.content, object_hook=lambda d: SimpleNamespace(**d))
except:
ui_print('[zilean] error: unable to parse response:' + response.content)
return []

ui_print('[zilean] ' + str(len(response)) + ' results found.')
for result in response[:]:
if regex.match(r'(' + altquery + ')', result.rawTitle, regex.I):
links = ['magnet:?xt=urn:btih:' + result.infoHash + '&dn=&tr=']
seeders = 0 # not available
scraped_releases += [releases.release(
'[zilean]', 'torrent', result.rawTitle, [], float(result.size) / 1000000000, links, seeders)]
else:
ui_print('[zilean] skipping ' + result.rawTitle + ' because it does not match deviation ' + altquery)

return scraped_releases
2 changes: 2 additions & 0 deletions settings/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,7 @@ def get(self):
setting('Nyaa sleep time', 'Enter a time in seconds to sleep between requests (default: "5"): ',scraper.services.nyaa, 'sleep', hidden=True),
setting('Nyaa proxy', 'Enter a proxy to use for nyaa (default: "nyaa.si"): ',scraper.services.nyaa, 'proxy', hidden=True),
setting('Torrentio Scraper Parameters','Please enter a valid torrentio manifest url: ',scraper.services.torrentio, 'default_opts', entry="parameter", help='This settings lets you control the torrentio scraping parameters. Visit "https://torrentio.strem.fun/configure" and configure your settings. Dont choose a debrid service. The "manifest url" will be copied to your clipboard.', hidden=True),
setting('Zilean Base URL', 'Please specify your Zilean base URL: ', scraper.services.zilean, 'base_url', hidden=True),
]
],
['Debrid Services', [
Expand Down Expand Up @@ -412,6 +413,7 @@ def get(self):
setting('Show Menu on Startup', 'Please enter "true" or "false": ', ui_settings, 'run_directly'),
setting('Debug printing', 'Please enter "true" or "false": ', ui_settings, 'debug'),
setting('Log to file', 'Please enter "true" or "false": ', ui_settings, 'log'),
setting('Watchlist loop interval (sec)', 'Please enter an integer value in seconds: ', ui_settings, 'loop_interval_seconds'),
setting('version', 'No snooping around! :D This is for compatability reasons.', ui_settings, 'version',
hidden=True),
]
Expand Down
2 changes: 1 addition & 1 deletion ui/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ def threaded(stop):
else:
print("Type 'exit' to return to the main menu.")
timeout = 5
regular_check = 1800
regular_check = int(ui_settings.loop_interval_seconds)
timeout_counter = 0
library = content.classes.library()[0]()
# get entire plex_watchlist
Expand Down
1 change: 1 addition & 0 deletions ui/ui_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
run_directly = "true"
debug = "false"
log = "false"
loop_interval_seconds = 1800

0 comments on commit 9676555

Please sign in to comment.