swisscows.py 3.69 KB
Newer Older
1
"""
2
 Swisscows (Web, Images)
3
4
5
6
7
8
9
10
11
12
13
14

 @website     https://swisscows.ch
 @provide-api no

 @using-api   no
 @results     HTML (using search portal)
 @stable      no (HTML can change)
 @parse       url, title, content
"""

from json import loads
import re
15
from lxml.html import fromstring
Adam Tauber's avatar
Adam Tauber committed
16
from searx.url_utils import unquote, urlencode
17
from searx.utils import match_language
18
19

# engine dependent config
20
categories = ['general', 'images']
21
22
23
24
25
26
27
paging = True
language_support = True

# search-url
base_url = 'https://swisscows.ch/'
search_string = '?{query}&page={page}'

28
29
supported_languages_url = base_url

30
# regex
Adam Tauber's avatar
Adam Tauber committed
31
32
33
34
regex_json = re.compile(b'initialData: {"Request":(.|\n)*},\s*environment')
regex_json_remove_start = re.compile(b'^initialData:\s*')
regex_json_remove_end = re.compile(b',\s*environment$')
regex_img_url_remove_start = re.compile(b'^https?://i\.swisscows\.ch/\?link=')
35
36
37
38


# do search-request
def request(query, params):
39
    region = match_language(params['language'], supported_languages, language_aliases)
40
    ui_language = region.split('-')[0]
41
42

    search_path = search_string.format(
Adam Tauber's avatar
Adam Tauber committed
43
44
45
        query=urlencode({'query': query, 'uiLanguage': ui_language, 'region': region}),
        page=params['pageno']
    )
46

47
48
49
50
    # image search query is something like 'image?{query}&page={page}'
    if params['category'] == 'images':
        search_path = 'image' + search_path

51
52
53
54
55
56
57
58
59
    params['url'] = base_url + search_path

    return params


# get response from search-request
def response(resp):
    results = []

Adam Tauber's avatar
Adam Tauber committed
60
    json_regex = regex_json.search(resp.text)
61
62
63
64
65

    # check if results are returned
    if not json_regex:
        return []

Adam Tauber's avatar
Adam Tauber committed
66
67
    json_raw = regex_json_remove_end.sub(b'', regex_json_remove_start.sub(b'', json_regex.group()))
    json = loads(json_raw.decode('utf-8'))
68

69
    # parse results
70
    for result in json['Results'].get('items', []):
71
72
73
74
        result_title = result['Title'].replace(u'\uE000', '').replace(u'\uE001', '')

        # parse image results
        if result.get('ContentType', '').startswith('image'):
Adam Tauber's avatar
Adam Tauber committed
75
            img_url = unquote(regex_img_url_remove_start.sub(b'', result['Url'].encode('utf-8')).decode('utf-8'))
76
77
78

            # append result
            results.append({'url': result['SourceUrl'],
79
                            'title': result['Title'],
80
81
82
83
84
85
86
87
88
89
90
                            'content': '',
                            'img_src': img_url,
                            'template': 'images.html'})

        # parse general results
        else:
            result_url = result['Url'].replace(u'\uE000', '').replace(u'\uE001', '')
            result_content = result['Description'].replace(u'\uE000', '').replace(u'\uE001', '')

            # append result
            results.append({'url': result_url,
91
92
                            'title': result_title,
                            'content': result_content})
93
94
95
96

    # parse images
    for result in json.get('Images', []):
        # decode image url
Adam Tauber's avatar
Adam Tauber committed
97
        img_url = unquote(regex_img_url_remove_start.sub(b'', result['Url'].encode('utf-8')).decode('utf-8'))
98
99
100

        # append result
        results.append({'url': result['SourceUrl'],
101
                        'title': result['Title'],
102
103
104
105
106
107
                        'content': '',
                        'img_src': img_url,
                        'template': 'images.html'})

    # return results
    return results
108
109
110


# get supported languages from their site
111
def _fetch_supported_languages(resp):
112
    supported_languages = []
113
    dom = fromstring(resp.text)
114
115
    options = dom.xpath('//div[@id="regions-popup"]//ul/li/a')
    for option in options:
116
        code = option.xpath('./@data-search-language')[0]
117
118
        if code.startswith('nb-'):
            code = code.replace('nb', 'no', 1)
119
120
121
        supported_languages.append(code)

    return supported_languages