Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ffe0972f authored by Dalf's avatar Dalf
Browse files

Remove some engines : subtitleseeker, seedpeer, swisscows

http://www.subtitleseeker.com and http://www.seedpeer.eu don't exist anymore.
https://swisscows.ch/ has change : the engine needs to be updated
parent 6c95ebcf
Loading
Loading
Loading
Loading

searx/engines/seedpeer.py

deleted100644 → 0
+0 −75
Original line number Diff line number Diff line
#  Seedpeer (Videos, Music, Files)
#
# @website     http://seedpeer.eu
# @provide-api no (nothing found)
#
# @using-api   no
# @results     HTML (using search portal)
# @stable      yes (HTML can change)
# @parse       url, title, content, seed, leech, magnetlink

from lxml import html
from operator import itemgetter
from searx.url_utils import quote, urljoin


url = 'http://www.seedpeer.eu/'
search_url = url + 'search/{search_term}/7/{page_no}.html'
# specific xpath variables
torrent_xpath = '//*[@id="body"]/center/center/table[2]/tr/td/a'
alternative_torrent_xpath = '//*[@id="body"]/center/center/table[1]/tr/td/a'
title_xpath = '//*[@id="body"]/center/center/table[2]/tr/td/a/text()'
alternative_title_xpath = '//*[@id="body"]/center/center/table/tr/td/a'
seeds_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[4]/font/text()'
alternative_seeds_xpath = '//*[@id="body"]/center/center/table/tr/td[4]/font/text()'
peers_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[5]/font/text()'
alternative_peers_xpath = '//*[@id="body"]/center/center/table/tr/td[5]/font/text()'
age_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[2]/text()'
alternative_age_xpath = '//*[@id="body"]/center/center/table/tr/td[2]/text()'
size_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[3]/text()'
alternative_size_xpath = '//*[@id="body"]/center/center/table/tr/td[3]/text()'


# do search-request
def request(query, params):
    params['url'] = search_url.format(search_term=quote(query),
                                      page_no=params['pageno'] - 1)
    return params


# get response from search-request
def response(resp):
    results = []
    dom = html.fromstring(resp.text)
    torrent_links = dom.xpath(torrent_xpath)
    if len(torrent_links) > 0:
        seeds = dom.xpath(seeds_xpath)
        peers = dom.xpath(peers_xpath)
        titles = dom.xpath(title_xpath)
        sizes = dom.xpath(size_xpath)
        ages = dom.xpath(age_xpath)
    else:  # under ~5 results uses a different xpath
        torrent_links = dom.xpath(alternative_torrent_xpath)
        seeds = dom.xpath(alternative_seeds_xpath)
        peers = dom.xpath(alternative_peers_xpath)
        titles = dom.xpath(alternative_title_xpath)
        sizes = dom.xpath(alternative_size_xpath)
        ages = dom.xpath(alternative_age_xpath)
    # return empty array if nothing is found
    if not torrent_links:
        return []

    # parse results
    for index, result in enumerate(torrent_links):
        link = result.attrib.get('href')
        href = urljoin(url, link)
        results.append({'url': href,
                        'title': titles[index].text_content(),
                        'content': '{}, {}'.format(sizes[index], ages[index]),
                        'seed': seeds[index],
                        'leech': peers[index],

                        'template': 'torrent.html'})

    # return results sorted by seeder
    return sorted(results, key=itemgetter('seed'), reverse=True)

searx/engines/subtitleseeker.py

deleted100644 → 0
+0 −86
Original line number Diff line number Diff line
"""
 Subtitleseeker (Video)

 @website     http://www.subtitleseeker.com
 @provide-api no

 @using-api   no
 @results     HTML
 @stable      no (HTML can change)
 @parse       url, title, content
"""

from lxml import html
from searx.languages import language_codes
from searx.engines.xpath import extract_text
from searx.url_utils import quote_plus

# engine dependent config
categories = ['videos']
paging = True
language = ""

# search-url
url = 'http://www.subtitleseeker.com/'
search_url = url + 'search/TITLES/{query}?p={pageno}'

# specific xpath variables
results_xpath = '//div[@class="boxRows"]'


# do search-request
def request(query, params):
    params['url'] = search_url.format(query=quote_plus(query),
                                      pageno=params['pageno'])
    return params


# get response from search-request
def response(resp):
    results = []

    dom = html.fromstring(resp.text)

    search_lang = ""

    # dirty fix for languages named differenly in their site
    if resp.search_params['language'][:2] == 'fa':
        search_lang = 'Farsi'
    elif resp.search_params['language'] == 'pt-BR':
        search_lang = 'Brazilian'
    elif resp.search_params['language'] != 'all':
        search_lang = [lc[3]
                       for lc in language_codes
                       if lc[0].split('-')[0] == resp.search_params['language'].split('-')[0]]
        search_lang = search_lang[0].split(' (')[0]

    # parse results
    for result in dom.xpath(results_xpath):
        link = result.xpath(".//a")[0]
        href = link.attrib.get('href')

        if language is not "":
            href = href + language + '/'
        elif search_lang:
            href = href + search_lang + '/'

        title = extract_text(link)

        content = extract_text(result.xpath('.//div[contains(@class,"red")]'))
        content = content + " - "
        text = extract_text(result.xpath('.//div[contains(@class,"grey-web")]')[0])
        content = content + text

        if result.xpath(".//span") != []:
            content = content +\
                " - (" +\
                extract_text(result.xpath(".//span")) +\
                ")"

        # append result
        results.append({'url': href,
                        'title': title,
                        'content': content})

    # return results
    return results

searx/engines/swisscows.py

deleted100644 → 0
+0 −125
Original line number Diff line number Diff line
"""
 Swisscows (Web, Images)

 @website     https://swisscows.ch
 @provide-api no

 @using-api   no
 @results     HTML (using search portal)
 @stable      no (HTML can change)
 @parse       url, title, content
"""

from json import loads
import re
from lxml.html import fromstring
from searx.url_utils import unquote, urlencode
from searx.utils import match_language

# engine dependent config
categories = ['general', 'images']
paging = True
language_support = True

# search-url
base_url = 'https://swisscows.ch/'
search_string = '?{query}&page={page}'

supported_languages_url = base_url

# regex
regex_json = re.compile(b'initialData: {"Request":(.|\n)*},\s*environment')
regex_json_remove_start = re.compile(b'^initialData:\s*')
regex_json_remove_end = re.compile(b',\s*environment$')
regex_img_url_remove_start = re.compile(b'^https?://i\.swisscows\.ch/\?link=')


# do search-request
def request(query, params):
    if params['language'] == 'all':
        ui_language = 'browser'
        region = 'browser'
    else:
        region = match_language(params['language'], supported_languages, language_aliases)
        ui_language = region.split('-')[0]

    search_path = search_string.format(
        query=urlencode({'query': query, 'uiLanguage': ui_language, 'region': region}),
        page=params['pageno']
    )

    # image search query is something like 'image?{query}&page={page}'
    if params['category'] == 'images':
        search_path = 'image' + search_path

    params['url'] = base_url + search_path

    return params


# get response from search-request
def response(resp):
    results = []

    json_regex = regex_json.search(resp.text)

    # check if results are returned
    if not json_regex:
        return []

    json_raw = regex_json_remove_end.sub(b'', regex_json_remove_start.sub(b'', json_regex.group()))
    json = loads(json_raw.decode('utf-8'))

    # parse results
    for result in json['Results'].get('items', []):
        result_title = result['Title'].replace(u'\uE000', '').replace(u'\uE001', '')

        # parse image results
        if result.get('ContentType', '').startswith('image'):
            img_url = unquote(regex_img_url_remove_start.sub(b'', result['Url'].encode('utf-8')).decode('utf-8'))

            # append result
            results.append({'url': result['SourceUrl'],
                            'title': result['Title'],
                            'content': '',
                            'img_src': img_url,
                            'template': 'images.html'})

        # parse general results
        else:
            result_url = result['Url'].replace(u'\uE000', '').replace(u'\uE001', '')
            result_content = result['Description'].replace(u'\uE000', '').replace(u'\uE001', '')

            # append result
            results.append({'url': result_url,
                            'title': result_title,
                            'content': result_content})

    # parse images
    for result in json.get('Images', []):
        # decode image url
        img_url = unquote(regex_img_url_remove_start.sub(b'', result['Url'].encode('utf-8')).decode('utf-8'))

        # append result
        results.append({'url': result['SourceUrl'],
                        'title': result['Title'],
                        'content': '',
                        'img_src': img_url,
                        'template': 'images.html'})

    # return results
    return results


# get supported languages from their site
def _fetch_supported_languages(resp):
    supported_languages = []
    dom = fromstring(resp.text)
    options = dom.xpath('//div[@id="regions-popup"]//ul/li/a')
    for option in options:
        code = option.xpath('./@data-search-language')[0]
        if code.startswith('nb-'):
            code = code.replace('nb', 'no', 1)
        supported_languages.append(code)

    return supported_languages
+0 −18
Original line number Diff line number Diff line
@@ -568,24 +568,12 @@ engines:
    engine : spotify
    shortcut : stf

  - name : subtitleseeker
    engine : subtitleseeker
    shortcut : ss
# The language is an option. You can put any language written in english
# Examples : English, French, German, Hungarian, Chinese...
#    language : English

  - name : startpage
    engine : startpage
    shortcut : sp
    timeout : 6.0
    disabled : True

  - name : swisscows
    engine : swisscows
    shortcut : sw
    disabled : True

  - name : tokyotoshokan
    engine : tokyotoshokan
    shortcut : tt
@@ -664,12 +652,6 @@ engines:
    timeout: 6.0
    categories : science

  - name : seedpeer
    engine : seedpeer
    shortcut: speu
    categories: files, music, videos
    disabled: True

  - name : dictzone
    engine : dictzone
    shortcut : dc
+0 −51
Original line number Diff line number Diff line
import mock
from collections import defaultdict
from searx.engines import seedpeer
from searx.testing import SearxTestCase
from datetime import datetime


class TestSeedPeerEngine(SearxTestCase):

    html = ''
    with open('./tests/unit/engines/seedpeer_fixture.html') as fixture:
        html += fixture.read()

    def test_request(self):
        query = 'test_query'
        dicto = defaultdict(dict)
        dicto['pageno'] = 1
        params = seedpeer.request(query, dicto)
        self.assertIn('url', params)
        self.assertIn(query, params['url'])
        self.assertIn('seedpeer.eu', params['url'])

    def test_response_raises_attr_error_on_empty_response(self):
        self.assertRaises(AttributeError, seedpeer.response, None)
        self.assertRaises(AttributeError, seedpeer.response, [])
        self.assertRaises(AttributeError, seedpeer.response, '')
        self.assertRaises(AttributeError, seedpeer.response, '[]')

    def test_response_returns_empty_list(self):
        response = mock.Mock(text='<html></html>')
        self.assertEqual(seedpeer.response(response), [])

    def test_response_returns_all_results(self):
        response = mock.Mock(text=self.html)
        results = seedpeer.response(response)
        self.assertTrue(isinstance(results, list))
        self.assertEqual(len(results), 2)

    def test_response_returns_correct_results(self):
        response = mock.Mock(text=self.html)
        results = seedpeer.response(response)
        self.assertEqual(
            results[0]['title'], 'Narcos - Season 2 - 720p WEBRiP - x265 HEVC - ShAaNiG '
        )
        self.assertEqual(
            results[0]['url'],
            'http://www.seedpeer.eu/details/11685972/Narcos---Season-2---720p-WEBRiP---x265-HEVC---ShAaNiG.html'
        )
        self.assertEqual(results[0]['content'], '2.48 GB, 1 day')
        self.assertEqual(results[0]['seed'], '861')
        self.assertEqual(results[0]['leech'], '332')
Loading