Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Unverified Commit 1ea56576 authored by Noémi Ványi's avatar Noémi Ványi Committed by GitHub
Browse files

Merge branch 'master' into devel_google_videos

parents 0e493db2 899ba5d6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@ RUN echo "@commuedge http://nl.alpinelinux.org/alpine/edge/community" >> /etc/ap
    openssl-dev \
    ca-certificates \
    tini@commuedge \
 && pip install --upgrade pip \
 && pip install --no-cache -r requirements.txt \
 && apk del \
    build-base \
+27225 −1

File changed.

Preview size limit exceeded, changes collapsed.

+1 −3
Original line number Diff line number Diff line
@@ -88,9 +88,7 @@ def response(resp):

        url = json_data.get('purl')
        img_src = json_data.get('murl')

        thumb_json_data = loads(_quote_keys_regex.sub(r'\1"\2": \3', link.attrib.get('mad')))
        thumbnail = thumb_json_data.get('turl')
        thumbnail = json_data.get('turl')

        # append result
        results.append({'template': 'images.html',

searx/engines/findx.py

deleted100644 → 0
+0 −115
Original line number Diff line number Diff line
"""
FindX (General, Images, Videos)

@website     https://www.findx.com
@provide-api no
@using-api   no
@results     HTML
@stable      no
@parse       url, title, content, embedded, img_src, thumbnail_src
"""

from dateutil import parser
from json import loads
import re

from lxml import html

from searx import logger
from searx.engines.xpath import extract_text
from searx.engines.youtube_noapi import base_youtube_url, embedded_url
from searx.url_utils import urlencode


paging = True
results_xpath = '//script[@id="initial-state"]'
search_url = 'https://www.findx.com/{category}?{q}'
type_map = {
    'none': 'web',
    'general': 'web',
    'images': 'images',
    'videos': 'videos',
}


def request(query, params):
    params['url'] = search_url.format(
        category=type_map[params['category']],
        q=urlencode({
            'q': query,
            'page': params['pageno']
        })
    )
    return params


def response(resp):
    dom = html.fromstring(resp.text)
    results_raw_json = dom.xpath(results_xpath)
    results_json = loads(extract_text(results_raw_json))

    if len(results_json['web']['results']) > 0:
        return _general_results(results_json['web']['results']['webSearch']['results'])

    if len(results_json['images']['results']) > 0:
        return _images_results(results_json['images']['results'])

    if len(results_json['video']['results']) > 0:
        return _videos_results(results_json['video']['results'])

    return []


def _general_results(general_results):
    results = []
    for result in general_results:
        results.append({
            'url': result['url'],
            'title': result['title'],
            'content': result['sum'],
        })
    return results


def _images_results(image_results):
    results = []
    for result in image_results:
        results.append({
            'url': result['sourceURL'],
            'title': result['title'],
            'content': result['source'],
            'thumbnail_src': _extract_url(result['assets']['thumb']['url']),
            'img_src': _extract_url(result['assets']['file']['url']),
            'template': 'images.html',
        })
    return results


def _videos_results(video_results):
    results = []
    for result in video_results:
        if not result['kind'].startswith('youtube'):
            logger.warn('Unknown video kind in findx: {}'.format(result['kind']))
            continue

        description = result['snippet']['description']
        if len(description) > 300:
            description = description[:300] + '...'

        results.append({
            'url': base_youtube_url + result['id'],
            'title': result['snippet']['title'],
            'content': description,
            'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),
            'publishedDate': parser.parse(result['snippet']['publishedAt']),
            'embedded': embedded_url.format(videoid=result['id']),
            'template': 'videos.html',
        })
    return results


def _extract_url(url):
    matching = re.search('(/https?://[^)]+)', url)
    if matching:
        return matching.group(0)[1:]
    return ''
+4 −7
Original line number Diff line number Diff line
@@ -32,8 +32,9 @@ search_url = base_url + 'do/search'
# specific xpath variables
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
# not ads: div[@class="result"] are the direct childs of div[@id="results"]
results_xpath = '//div[@class="result"]'
results_xpath = '//li[contains(@class, "search-result") and contains(@class, "search-item")]'
link_xpath = './/h3/a'
content_xpath = './p[@class="search-item__body"]'


# do search-request
@@ -73,14 +74,10 @@ def response(resp):
        if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
            continue

        # block ixquick search url's
        if re.match(r"^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url):
            continue

        title = extract_text(link)

        if result.xpath('./p[@class="desc clk"]'):
            content = extract_text(result.xpath('./p[@class="desc clk"]'))
        if result.xpath(content_xpath):
            content = extract_text(result.xpath(content_xpath))
        else:
            content = ''

Loading