Commit 310d7b70 authored by Nicolas Gelot's avatar Nicolas Gelot

Merge remote-tracking branch 'asciimoo/master' into dev

parents 2cc736bd 629b36d4
flask==1.0.2 flask==1.0.2
flask-babel==0.11.2 jinja2==2.10
flask-babel==0.12.2
lxml==4.3.3 lxml==4.3.3
pygments==2.3.1 pygments==2.3.1
python-dateutil==2.8.0 python-dateutil==2.8.0
......
"""
APK Mirror
@website https://www.apkmirror.com
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, thumbnail_src
"""
from lxml import html
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
# engine dependent config
categories = ['it']
paging = True
# I am not 100% certain about this, as apkmirror appears to be a wordpress site,
# which might support time_range searching. If you want to implement it, go ahead.
time_range_support = False
# search-url
base_url = 'https://www.apkmirror.com'
search_url = base_url + '/?post_type=app_release&searchtype=apk&page={pageno}&{query}'
# do search-request
def request(query, params):
params['url'] = search_url.format(pageno=params['pageno'],
query=urlencode({'s': query}))
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
# parse results
for result in dom.xpath('.//div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'):
link = result.xpath('.//h5/a')[0]
url = base_url + link.attrib.get('href') + '#downloads'
title = extract_text(link)
thumbnail_src = base_url + result.xpath('.//img')[0].attrib.get('src').replace('&w=32&h=32', '&w=64&h=64')
res = {
'url': url,
'title': title,
'thumbnail_src': thumbnail_src
}
# append result
results.append(res)
# return results
return results
...@@ -35,9 +35,12 @@ site_url = 'https://duckduckgo.com/?{query}&iar=images&iax=1&ia=images' ...@@ -35,9 +35,12 @@ site_url = 'https://duckduckgo.com/?{query}&iar=images&iax=1&ia=images'
# run query in site to get vqd number needed for requesting images # run query in site to get vqd number needed for requesting images
# TODO: find a way to get this number without an extra request (is it a hash of the query?) # TODO: find a way to get this number without an extra request (is it a hash of the query?)
def get_vqd(query): def get_vqd(query, headers):
res = get(site_url.format(query=urlencode({'q': query}))) query_url = site_url.format(query=urlencode({'q': query}))
res = get(query_url, headers=headers)
content = res.text content = res.text
if content.find('vqd=\'') == -1:
raise Exception('Request failed')
vqd = content[content.find('vqd=\'') + 5:] vqd = content[content.find('vqd=\'') + 5:]
vqd = vqd[:vqd.find('\'')] vqd = vqd[:vqd.find('\'')]
return vqd return vqd
...@@ -47,7 +50,7 @@ def get_vqd(query): ...@@ -47,7 +50,7 @@ def get_vqd(query):
def request(query, params): def request(query, params):
# to avoid running actual external requests when testing # to avoid running actual external requests when testing
if 'is_test' not in params: if 'is_test' not in params:
vqd = get_vqd(query) vqd = get_vqd(query, params['headers'])
else: else:
vqd = '12345' vqd = '12345'
...@@ -74,7 +77,7 @@ def response(resp): ...@@ -74,7 +77,7 @@ def response(resp):
try: try:
res_json = loads(content) res_json = loads(content)
except: except:
return [] raise Exception('Cannot parse results')
# parse results # parse results
for result in res_json['results']: for result in res_json['results']:
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
""" """
from datetime import date, timedelta from datetime import date, timedelta
from json import loads
from lxml import html from lxml import html
from searx.url_utils import urlencode, urlparse, parse_qs from searx.url_utils import urlencode, urlparse, parse_qs
...@@ -39,7 +38,6 @@ time_range_dict = {'day': 'd', ...@@ -39,7 +38,6 @@ time_range_dict = {'day': 'd',
# do search-request # do search-request
def request(query, params): def request(query, params):
search_options = { search_options = {
'ijn': params['pageno'] - 1,
'start': (params['pageno'] - 1) * number_of_results 'start': (params['pageno'] - 1) * number_of_results
} }
...@@ -53,7 +51,7 @@ def request(query, params): ...@@ -53,7 +51,7 @@ def request(query, params):
search_options['tbs'] = time_range_custom_attr.format(start=start, end=end) search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
if safesearch and params['safesearch']: if safesearch and params['safesearch']:
search_options['safe'] = 'on' search_options['safe'] = 'active'
params['url'] = search_url.format(query=urlencode({'q': query}), params['url'] = search_url.format(query=urlencode({'q': query}),
search_options=urlencode(search_options)) search_options=urlencode(search_options))
...@@ -63,24 +61,30 @@ def request(query, params): ...@@ -63,24 +61,30 @@ def request(query, params):
# get response from search-request # get response from search-request
def response(resp): def response(resp):
dom = html.fromstring(resp.text)
results = [] results = []
for element in dom.xpath('//div[@id="search"] //td'):
link = element.xpath('./a')[0]
dom = html.fromstring(resp.text) google_url = urlparse(link.xpath('.//@href')[0])
query = parse_qs(google_url.query)
source_url = next(iter(query.get('q', [])), None)
# parse results title_parts = element.xpath('./cite//following-sibling::*/text()')
for img in dom.xpath('//a'): title_parts.extend(element.xpath('./cite//following-sibling::text()')[:-1])
r = {
'title': ' '.join(img.xpath('.//div[class="rg_ilmbg"]//text()')), result = {
'title': ''.join(title_parts),
'content': '', 'content': '',
'template': 'images.html', 'template': 'images.html',
'url': source_url,
'img_src': source_url,
'thumbnail_src': next(iter(link.xpath('.//img //@src')), None)
} }
url = urlparse(img.xpath('.//@href')[0])
query = parse_qs(url.query) if not source_url or not result['thumbnail_src']:
r['url'] = query['imgrefurl'][0] continue
r['img_src'] = query['imgurl'][0]
r['thumbnail_src'] = r['img_src'] results.append(result)
# append result
results.append(r)
# return results
return results return results
...@@ -8,7 +8,8 @@ ...@@ -8,7 +8,8 @@
# @stable no # @stable no
# @parse url, title, content, publishedDate, thumbnail, embedded # @parse url, title, content, publishedDate, thumbnail, embedded
from lxml import html from functools import reduce
from json import loads
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.utils import list_get from searx.utils import list_get
from searx.url_utils import quote_plus from searx.url_utils import quote_plus
...@@ -34,20 +35,6 @@ embedded_url = '<iframe width="540" height="304" ' +\ ...@@ -34,20 +35,6 @@ embedded_url = '<iframe width="540" height="304" ' +\
base_youtube_url = 'https://www.youtube.com/watch?v=' base_youtube_url = 'https://www.youtube.com/watch?v='
# specific xpath variables
results_xpath = "//ol/li/div[contains(@class, 'yt-lockup yt-lockup-tile yt-lockup-video vve-check')]"
url_xpath = './/h3/a/@href'
title_xpath = './/div[@class="yt-lockup-content"]/h3/a'
content_xpath = './/div[@class="yt-lockup-content"]/div[@class="yt-lockup-description yt-ui-ellipsis yt-ui-ellipsis-2"]'
# returns extract_text on the first result selected by the xpath or None
def extract_text_from_dom(result, xpath):
r = result.xpath(xpath)
if len(r) > 0:
return extract_text(r[0])
return None
# do search-request # do search-request
def request(query, params): def request(query, params):
...@@ -63,27 +50,38 @@ def request(query, params): ...@@ -63,27 +50,38 @@ def request(query, params):
def response(resp): def response(resp):
results = [] results = []
dom = html.fromstring(resp.text) results_data = resp.text[resp.text.find('ytInitialData'):]
results_data = results_data[results_data.find('{'):results_data.find(';\n')]
# parse results
for result in dom.xpath(results_xpath): results_json = loads(results_data) if results_data else {}
videoid = list_get(result.xpath('@data-context-item-id'), 0) sections = results_json.get('contents', {})\
if videoid is not None: .get('twoColumnSearchResultsRenderer', {})\
url = base_youtube_url + videoid .get('primaryContents', {})\
thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg' .get('sectionListRenderer', {})\
.get('contents', [])
title = extract_text_from_dom(result, title_xpath) or videoid
content = extract_text_from_dom(result, content_xpath) for section in sections:
for video_container in section.get('itemSectionRenderer', {}).get('contents', []):
embedded = embedded_url.format(videoid=videoid) video = video_container.get('videoRenderer', {})
videoid = video.get('videoId')
# append result if videoid is not None:
results.append({'url': url, url = base_youtube_url + videoid
'title': title, thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg'
'content': content, title = video.get('title', {}).get('simpleText', videoid)
'template': 'videos.html', description_snippet = video.get('descriptionSnippet', {})
'embedded': embedded, if 'runs' in description_snippet:
'thumbnail': thumbnail}) content = reduce(lambda a, b: a + b.get('text', ''), description_snippet.get('runs'), '')
else:
content = description_snippet.get('simpleText', '')
embedded = embedded_url.format(videoid=videoid)
# append result
results.append({'url': url,
'title': title,
'content': content,
'template': 'videos.html',
'embedded': embedded,
'thumbnail': thumbnail})
# return results # return results
return results return results
...@@ -111,19 +111,16 @@ class RawTextQuery(object): ...@@ -111,19 +111,16 @@ class RawTextQuery(object):
parse_next = True parse_next = True
engine_name = engine_shortcuts[prefix] engine_name = engine_shortcuts[prefix]
if engine_name in engines: if engine_name in engines:
for engine_category in engines[engine_name].categories: self.engines.append({'category': 'none',
self.engines.append({'category': engine_category, 'name': engine_name,
'name': engine_name, 'from_bang': True})
'from_bang': True})
# check if prefix is equal with engine name # check if prefix is equal with engine name
elif prefix in engines: elif prefix in engines:
parse_next = True parse_next = True
if prefix in engines: self.engines.append({'category': 'none',
for engine_category in engines[prefix].categories: 'name': prefix,
self.engines.append({'category': engine_category, 'from_bang': True})
'name': prefix,
'from_bang': True})
# check if prefix is equal with categorie name # check if prefix is equal with categorie name
elif prefix in categories: elif prefix in categories:
......
...@@ -55,6 +55,12 @@ outgoing: # communication with search engines ...@@ -55,6 +55,12 @@ outgoing: # communication with search engines
# - 1.1.1.2 # - 1.1.1.2
engines: engines:
- name: apk mirror
engine: apkmirror
timeout: 4.0
shortcut: apkm
disabled: True
- name : arch linux wiki - name : arch linux wiki
engine : archlinux engine : archlinux
shortcut : al shortcut : al
...@@ -128,7 +134,7 @@ engines: ...@@ -128,7 +134,7 @@ engines:
- name : crossref - name : crossref
engine : json_engine engine : json_engine
paging : True paging : True
search_url : http://search.crossref.org/dois?q={query}&page={pageno} search_url : https://search.crossref.org/dois?q={query}&page={pageno}
url_query : doi url_query : doi
title_query : title title_query : title
content_query : fullCitation content_query : fullCitation
...@@ -306,7 +312,7 @@ engines: ...@@ -306,7 +312,7 @@ engines:
url_xpath : .//h3/a/@href url_xpath : .//h3/a/@href
title_xpath : .//h3/a title_xpath : .//h3/a
content_xpath : .//div[@class="gs_rs"] content_xpath : .//div[@class="gs_rs"]
suggestion_xpath : //div[@id="gs_qsuggest"]/ul/li suggestion_xpath : //div[@id="gs_res_ccl_top"]//a/b
page_size : 10 page_size : 10
first_page_num : 0 first_page_num : 0
categories : science categories : science
......
...@@ -41,7 +41,7 @@ class TestDuckduckgoImagesEngine(TestCase): ...@@ -41,7 +41,7 @@ class TestDuckduckgoImagesEngine(TestCase):
self.assertRaises(AttributeError, duckduckgo_images.response, '[]') self.assertRaises(AttributeError, duckduckgo_images.response, '[]')
response = mock.Mock(text='If this error persists, please let us know: ops@duckduckgo.com') response = mock.Mock(text='If this error persists, please let us know: ops@duckduckgo.com')
self.assertEqual(duckduckgo_images.response(response), []) self.assertRaises(Exception, duckduckgo_images.response, response)
json = """ json = """
{ {
......
...@@ -46,121 +46,71 @@ class TestYoutubeNoAPIEngine(TestCase): ...@@ -46,121 +46,71 @@ class TestYoutubeNoAPIEngine(TestCase):
self.assertEqual(youtube_noapi.response(response), []) self.assertEqual(youtube_noapi.response(response), [])
html = """ html = """
<ol id="item-section-063864" class="item-section"> <div></div>
<li> <script>
<div class="yt-lockup yt-lockup-tile yt-lockup-video vve-check clearfix yt-uix-tile" window["ytInitialData"] = {
data-context-item-id="DIVZCPfAOeM" "contents": {
data-visibility-tracking="CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JECx_-GK5uqMpcIB"> "twoColumnSearchResultsRenderer": {
<div class="yt-lockup-dismissable"><div class="yt-lockup-thumbnail contains-addto"> "primaryContents": {
<a aria-hidden="true" href="/watch?v=DIVZCPfAOeM" class=" yt-uix-sessionlink pf-link" "sectionListRenderer": {
data-sessionlink="itct=CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JFIEdGVzdA"> "contents": [
<div class="yt-thumb video-thumb"><img src="//i.ytimg.com/vi/DIVZCPfAOeM/mqdefault.jpg" {
width="196" height="110"/></div><span class="video-time" aria-hidden="true">11:35</span></a> "itemSectionRenderer": {
<span class="thumb-menu dark-overflow-action-menu video-actions"> "contents": [
</span> {
</div> "videoRenderer": {
<div class="yt-lockup-content"> "videoId": "DIVZCPfAOeM",
<h3 class="yt-lockup-title"> "title": {
<a href="/watch?v=DIVZCPfAOeM" "simpleText": "Title"
class="yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 yt-uix-sessionlink spf-link" },
data-sessionlink="itct=CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JFIEdGVzdA" "descriptionSnippet": {
title="Top Speed Test Kawasaki Ninja H2 (Thailand) By. MEHAY SUPERBIKE" "runs": [
aria-describedby="description-id-259079" rel="spf-prefetch" dir="ltr"> {
Title "text": "Des"
</a> },
<span class="accessible-description" id="description-id-259079"> - Durée : 11:35.</span> {
</h3> "text": "cription"
<div class="yt-lockup-byline">de }
<a href="/user/mheejapan" class=" yt-uix-sessionlink spf-link g-hovercard" ]
data-sessionlink="itct=CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JA" data-ytid="UCzEesu54Hjs0uRKmpy66qeA" }
data-name="">MEHAY SUPERBIKE</a></div><div class="yt-lockup-meta"> }
<ul class="yt-lockup-meta-info"> },
<li>il y a 20 heures</li> {
<li>8 424 vues</li> "videoRenderer": {
</ul> "videoId": "9C_HReR_McQ",
</div> "title": {
<div class="yt-lockup-description yt-ui-ellipsis yt-ui-ellipsis-2" dir="ltr"> "simpleText": "Title"
Description },
</div> "descriptionSnippet": {
<div class="yt-lockup-badges"> "simpleText": "Description"
<ul class="yt-badge-list "> }
<li class="yt-badge-item" > }
<span class="yt-badge">Nouveauté</span> }
</li> ]
<li class="yt-badge-item" ><span class="yt-badge " >HD</span></li> }
</ul> }
</div> ]
<div class="yt-lockup-action-menu yt-uix-menu-container"> }
<div class="yt-uix-menu yt-uix-videoactionmenu hide-until-delayloaded" }
data-video-id="DIVZCPfAOeM" data-menu-content-id="yt-uix-videoactionmenu-menu"> }
</div> }
</div> };
</div> </script>
</div>
</div>
</li>
</ol>
""" """
response = mock.Mock(text=html) response = mock.Mock(text=html)
results = youtube_noapi.response(response) results = youtube_noapi.response(response)
self.assertEqual(type(results), list) self.assertEqual(type(results), list)
self.assertEqual(len(results), 1) self.assertEqual(len(results), 2)
self.assertEqual(results[0]['title'], 'Title') self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'https://www.youtube.com/watch?v=DIVZCPfAOeM') self.assertEqual(results[0]['url'], 'https://www.youtube.com/watch?v=DIVZCPfAOeM')
self.assertEqual(results[0]['content'], 'Description') self.assertEqual(results[0]['content'], 'Description')
self.assertEqual(results[0]['thumbnail'], 'https://i.ytimg.com/vi/DIVZCPfAOeM/hqdefault.jpg') self.assertEqual(results[0]['thumbnail'], 'https://i.ytimg.com/vi/DIVZCPfAOeM/hqdefault.jpg')
self.assertTrue('DIVZCPfAOeM' in results[0]['embedded']) self.assertTrue('DIVZCPfAOeM' in results[0]['embedded'])
self.assertEqual(results[1]['title'], 'Title')
html = """ self.assertEqual(results[1]['url'], 'https://www.youtube.com/watch?v=9C_HReR_McQ')
<ol id="item-section-063864" class="item-section"> self.assertEqual(results[1]['content'], 'Description')
<li> self.assertEqual(results[1]['thumbnail'], 'https://i.ytimg.com/vi/9C_HReR_McQ/hqdefault.jpg')
<div class="yt-lockup yt-lockup-tile yt-lockup-video vve-check clearfix yt-uix-tile" self.assertTrue('9C_HReR_McQ' in results[1]['embedded'])
data-context-item-id="DIVZCPfAOeM"
data-visibility-tracking="CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JECx_-GK5uqMpcIB">
<div class="yt-lockup-dismissable"><div class="yt-lockup-thumbnail contains-addto">
<a aria-hidden="true" href="/watch?v=DIVZCPfAOeM" class=" yt-uix-sessionlink pf-link"
data-sessionlink="itct=CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JFIEdGVzdA">
<div class="yt-thumb video-thumb"><img src="//i.ytimg.com/vi/DIVZCPfAOeM/mqdefault.jpg"
width="196" height="110"/></div><span class="video-time" aria-hidden="true">11:35</span></a>
<span class="thumb-menu dark-overflow-action-menu video-actions">
</span>
</div>
<div class="yt-lockup-content">
<h3 class="yt-lockup-title">
<span class="accessible-description" id="description-id-259079"> - Durée : 11:35.</span>
</h3>
<div class="yt-lockup-byline">de
<a href="/user/mheejapan" class=" yt-uix-sessionlink spf-link g-hovercard"
data-sessionlink="itct=CBgQ3DAYACITCPGXnYau6sUCFZEIHAod-VQASCj0JA" data-ytid="UCzEesu54Hjs0uRKmpy66qeA"
data-name="">MEHAY SUPERBIKE</a></div><div class="yt-lockup-meta">
<ul class="yt-lockup-meta-info">
<li>il y a 20 heures</li>
<li>8 424 vues</li>
</ul>
</div>
<div class="yt-lockup-badges">
<ul class="yt-badge-list ">
<li class="yt-badge-item" >
<span class="yt-badge">Nouveauté</span>
</li>
<li class="yt-badge-item" ><span class="yt-badge " >HD</span></li>
</ul>
</div>
<div class="yt-lockup-action-menu yt-uix-menu-container">
<div class="yt-uix-menu yt-uix-videoactionmenu hide-until-delayloaded"
data-video-id="DIVZCPfAOeM" data-menu-content-id="yt-uix-videoactionmenu-menu">
</div>
</div>
</div>
</div>
</div>
</li>
</ol>
"""
response = mock.Mock(text=html)
results = youtube_noapi.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
html = """ html = """
<ol id="item-section-063864" class="item-section"> <ol id="item-section-063864" class="item-section">
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment