Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 629a05e1 authored by Thomas Pointhuber's avatar Thomas Pointhuber
Browse files

fix youtube engine and add comments

* add language-support
* decrease search-results/site to 5
* add comments
parent bb628469
Loading
Loading
Loading
Loading
+36 −7
Original line number Diff line number Diff line
## Youtube (Videos)
# 
# @website     https://www.youtube.com/
# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
# 
# @using-api   yes
# @results     JSON
# @stable      yes
# @parse       url, title, content, publishedDate, thumbnail

from json import loads
from urllib import urlencode
from dateutil import parser

# engine dependent config
categories = ['videos']

search_url = ('https://gdata.youtube.com/feeds/api/videos'
              '?alt=json&{query}&start-index={index}&max-results=25')  # noqa

paging = True
language_support = True

# search-url
base_url = 'https://gdata.youtube.com/feeds/api/videos'
search_url = base_url + '?alt=json&{query}&start-index={index}&max-results=5'  # noqa


# do search-request
def request(query, params):
    index = (params['pageno'] - 1) * 25 + 1
    index = (params['pageno'] - 1) * 5 + 1

    params['url'] = search_url.format(query=urlencode({'q': query}),
                                      index=index)

    # add language tag if specified
    if params['language'] != 'all':
        params['url'] += '&lr=' + params['language'].split('_')[0]

    return params


# get response from search-request
def response(resp):
    results = []

    search_results = loads(resp.text)

    # return empty array if there are no results
    if not 'feed' in search_results:
        return results
        return []

    feed = search_results['feed']

    # parse results
    for result in feed['entry']:
        url = [x['href'] for x in result['link'] if x['type'] == 'text/html']

        if not url:
            return

        # remove tracking
        url = url[0].replace('feature=youtube_gdata', '')
        if url.endswith('&'):
            url = url[:-1]

        title = result['title']['$t']
        content = ''
        thumbnail = ''

#"2013-12-31T15:22:51.000Z"
        pubdate = result['published']['$t']
        publishedDate = parser.parse(pubdate)

@@ -49,6 +76,7 @@ def response(resp):
        else:
            content = result['content']['$t']

        # append result
        results.append({'url': url,
                        'title': title,
                        'content': content,
@@ -56,4 +84,5 @@ def response(resp):
                        'publishedDate': publishedDate,
                        'thumbnail': thumbnail})

    # return results
    return results
+0 −3
Original line number Diff line number Diff line
@@ -131,13 +131,10 @@ engines:

  - name : youtube
    engine : youtube
    categories : videos
    shortcut : yt

  - name : dailymotion
    engine : dailymotion
    locale : en_US
    categories : videos
    shortcut : dm

  - name : vimeo