Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed6a6363 authored by Johnny Kalajdzic's avatar Johnny Kalajdzic
Browse files

Add ten images in results in category "general"

parent a2d6f8e5
Loading
Loading
Loading
Loading
+11 −0
Original line number Original line Diff line number Diff line
@@ -36,6 +36,17 @@
            {% endfor %}
            {% endfor %}
            {% endif %}
            {% endif %}


            {% for result in results_image %}
            <div class="result {% if result['template'] %}result-{{ result.template|replace('.html', '') }}{% else %}result-default{% endif %}">
                {% set index = loop.index %}
                {% if result.template %}
                    {% include get_result_template('oscar', result['template']) %}
                {% else %}
                    {% include 'oscar/result_templates/default.html' %}
                {% endif %}
            </div>
            {% endfor %}

            {% for result in results %}
            {% for result in results %}
            <div class="result {% if result['template'] %}result-{{ result.template|replace('.html', '') }}{% else %}result-default{% endif %}">
            <div class="result {% if result['template'] %}result-{{ result.template|replace('.html', '') }}{% else %}result-default{% endif %}">
                {% set index = loop.index %}
                {% set index = loop.index %}
+71 −119
Original line number Original line Diff line number Diff line
@@ -16,10 +16,12 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.


(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
'''
'''
from searx.results import ResultContainer


if __name__ == '__main__':
if __name__ == '__main__':
    from sys import path
    from sys import path
    from os.path import realpath, dirname
    from os.path import realpath, dirname

    path.append(realpath(dirname(realpath(__file__)) + '/../'))
    path.append(realpath(dirname(realpath(__file__)) + '/../'))


import hashlib
import hashlib
@@ -31,6 +33,7 @@ import sys
import requests
import requests


from searx import logger
from searx import logger

logger = logger.getChild('webapp')
logger = logger.getChild('webapp')


try:
try:
@@ -40,6 +43,7 @@ try:
except:
except:
    logger.critical("cannot import dependency: pygments")
    logger.critical("cannot import dependency: pygments")
    from sys import exit
    from sys import exit

    exit(1)
    exit(1)
from cgi import escape
from cgi import escape
from datetime import datetime, timedelta
from datetime import datetime, timedelta
@@ -63,7 +67,7 @@ from searx.utils import (
from searx.version import VERSION_STRING
from searx.version import VERSION_STRING
from searx.languages import language_codes as languages
from searx.languages import language_codes as languages
from searx.search import SearchWithPlugins, get_search_query_from_webapp
from searx.search import SearchWithPlugins, get_search_query_from_webapp
from searx.query import RawTextQuery
from searx.query import RawTextQuery, SearchQuery
from searx.autocomplete import searx_bang, backends as autocomplete_backends
from searx.autocomplete import searx_bang, backends as autocomplete_backends
from searx.plugins import plugins
from searx.plugins import plugins
from searx.plugins.oa_doi_rewrite import get_doi_resolver
from searx.plugins.oa_doi_rewrite import get_doi_resolver
@@ -85,7 +89,6 @@ try:
except:
except:
    from io import StringIO
    from io import StringIO



if sys.version_info[0] == 3:
if sys.version_info[0] == 3:
    unicode = str
    unicode = str
    PY3 = True
    PY3 = True
@@ -94,6 +97,7 @@ else:


# serve pages with HTTP/1.1
# serve pages with HTTP/1.1
from werkzeug.serving import WSGIRequestHandler
from werkzeug.serving import WSGIRequestHandler

WSGIRequestHandler.protocol_version = "HTTP/{}".format(settings['server'].get('http_protocol_version', '1.0'))
WSGIRequestHandler.protocol_version = "HTTP/{}".format(settings['server'].get('http_protocol_version', '1.0'))


# about static
# about static
@@ -197,7 +201,6 @@ def code_highlighter(codelines, language=None):
        # new codeblock is detected
        # new codeblock is detected
        if last_line is not None and \
        if last_line is not None and \
                last_line + 1 != line:
                last_line + 1 != line:

            # highlight last codepart
            # highlight last codepart
            formatter = HtmlFormatter(linenos='inline',
            formatter = HtmlFormatter(linenos='inline',
                                      linenostart=line_code_start)
                                      linenostart=line_code_start)
@@ -288,7 +291,6 @@ def proxify(url):




def image_proxify(url):
def image_proxify(url):

    if url.startswith('//'):
    if url.startswith('//'):
        url = 'https:' + url
        url = 'https:' + url


@@ -433,29 +435,33 @@ def pre_request():
                or plugin.id in allowed_plugins):
                or plugin.id in allowed_plugins):
            request.user_plugins.append(plugin)
            request.user_plugins.append(plugin)


def config_results(results, query):
    for result in results:
        if 'content' in result and result['content']:
            result['content'] = highlight_content(escape(result['content'][:1024]), query)
        result['title'] = highlight_content(escape(result['title'] or u''), query)
        result['pretty_url'] = prettify_url(result['url'])


def index_error(output_format, error_message):
        # TODO, check if timezone is calculated right
    if output_format == 'json':
        if 'publishedDate' in result:
        return Response(json.dumps({'error': error_message}),
            try:  # test if publishedDate >= 1900 (datetime module bug)
                        mimetype='application/json')
                result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
    elif output_format == 'csv':
            except ValueError:
        response = Response('', mimetype='application/csv')
                result['publishedDate'] = None
        cont_disp = 'attachment;Filename=searx.csv'
            else:
        response.headers.add('Content-Disposition', cont_disp)
                if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):
        return response
                    timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)
    elif output_format == 'rss':
                    minutes = int((timedifference.seconds / 60) % 60)
        response_rss = render(
                    hours = int(timedifference.seconds / 60 / 60)
            'opensearch_response_rss.xml',
                    if hours == 0:
            results=[],
                        result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)
            q=request.form['q'] if 'q' in request.form else '',
            number_of_results=0,
            base_url=get_base_url(),
            error_message=error_message,
            override_theme='__common__',
        )
        return Response(response_rss, mimetype='text/xml')
                    else:
                    else:
        # html
                        result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(
                            hours=hours, minutes=minutes)  # noqa
                else:
                    result['publishedDate'] = format_date(result['publishedDate'])

def index_error():
        request.errors.append(gettext('search error'))
        request.errors.append(gettext('search error'))
        return render(
        return render(
            'index.html',
            'index.html',
@@ -465,30 +471,19 @@ def index_error(output_format, error_message):
@app.route('/search', methods=['GET', 'POST'])
@app.route('/search', methods=['GET', 'POST'])
@app.route('/', methods=['GET', 'POST'])
@app.route('/', methods=['GET', 'POST'])
def index():
def index():
    """Render index page.

    Supported outputs: html, json, csv, rss.
    """

    # output_format
    output_format = request.form.get('format', 'html')
    if output_format not in ['html', 'csv', 'json', 'rss']:
        output_format = 'html'

    # check if there is query
    # check if there is query
    if request.form.get('q') is None:
    if request.form.get('q') is None:
        if output_format == 'html':
        return render(
        return render(
            'index.html',
            'index.html',
        )
        )
        else:
            return index_error(output_format, 'No query'), 400


    # search
    # search
    search_query = None
    search_query = None
    result_container = None
    result_container = None
    results_images = []
    try:
    try:
        search_query = get_search_query_from_webapp(request.preferences, request.form)
        search_query = get_search_query_from_webapp(request.preferences, request.form)

        # search = Search(search_query) #  without plugins
        # search = Search(search_query) #  without plugins
        search = SearchWithPlugins(search_query, request.user_plugins, request)
        search = SearchWithPlugins(search_query, request.user_plugins, request)
        result_container = search.search()
        result_container = search.search()
@@ -498,9 +493,27 @@ def index():


        # is it an invalid input parameter or something else ?
        # is it an invalid input parameter or something else ?
        if (issubclass(e.__class__, SearxParameterException)):
        if (issubclass(e.__class__, SearxParameterException)):
            return index_error(output_format, e.message), 400
            return index_error(), 400
        else:
        else:
            return index_error(output_format, gettext('search error')), 500
            return index_error(), 500

    # serarch images
    if search_query.categories == ['general'] and search_query.pageno == 1:
        search_images_engines = []
        disabled_engines = request.preferences.engines.get_disabled()
        for engine in categories['images']:
            if (engine.name, 'images') not in disabled_engines:
                search_images_engines.append({'category': 'images', 'name': engine.name})
        images_search_query = SearchQuery(search_query.query, search_images_engines, ['images'], search_query.lang,
                                          search_query.safesearch, 1, search_query.time_range)
        results_images_big = SearchWithPlugins(images_search_query, request.user_plugins,
                                                    request).search().get_ordered_results()
        to_ten = 0
        for image in results_images_big:
            to_ten+=1
            if to_ten > 10:
                break
            results_images.append(image)


    # results
    # results
    results = result_container.get_ordered_results()
    results = result_container.get_ordered_results()
@@ -512,70 +525,8 @@ def index():
    advanced_search = request.form.get('advanced_search', None)
    advanced_search = request.form.get('advanced_search', None)


    # output
    # output
    for result in results:
    config_results(results, search_query.query)
        if output_format == 'html':
    config_results(results_images, search_query.query)
            if 'content' in result and result['content']:
                result['content'] = highlight_content(escape(result['content'][:1024]), search_query.query)
            result['title'] = highlight_content(escape(result['title'] or u''), search_query.query)
        else:
            if result.get('content'):
                result['content'] = html_to_text(result['content']).strip()
            # removing html content and whitespace duplications
            result['title'] = ' '.join(html_to_text(result['title']).strip().split())

        result['pretty_url'] = prettify_url(result['url'])

        # TODO, check if timezone is calculated right
        if 'publishedDate' in result:
            try:  # test if publishedDate >= 1900 (datetime module bug)
                result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
            except ValueError:
                result['publishedDate'] = None
            else:
                if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):
                    timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)
                    minutes = int((timedifference.seconds / 60) % 60)
                    hours = int(timedifference.seconds / 60 / 60)
                    if hours == 0:
                        result['publishedDate'] = gettext(u'{minutes} minute(s) ago').format(minutes=minutes)
                    else:
                        result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes)  # noqa
                else:
                    result['publishedDate'] = format_date(result['publishedDate'])

    if output_format == 'json':
        return Response(json.dumps({'query': search_query.query.decode('utf-8'),
                                    'number_of_results': number_of_results,
                                    'results': results,
                                    'answers': list(result_container.answers),
                                    'corrections': list(result_container.corrections),
                                    'infoboxes': result_container.infoboxes,
                                    'suggestions': list(result_container.suggestions),
                                    'unresponsive_engines': list(result_container.unresponsive_engines)},
                                   default=lambda item: list(item) if isinstance(item, set) else item),
                        mimetype='application/json')
    elif output_format == 'csv':
        csv = UnicodeWriter(StringIO())
        keys = ('title', 'url', 'content', 'host', 'engine', 'score')
        csv.writerow(keys)
        for row in results:
            row['host'] = row['parsed_url'].netloc
            csv.writerow([row.get(key, '') for key in keys])
        csv.stream.seek(0)
        response = Response(csv.stream.read(), mimetype='application/csv')
        cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search_query.query)
        response.headers.add('Content-Disposition', cont_disp)
        return response
    elif output_format == 'rss':
        response_rss = render(
            'opensearch_response_rss.xml',
            results=results,
            q=request.form['q'],
            number_of_results=number_of_results,
            base_url=get_base_url(),
            override_theme='__common__',
        )
        return Response(response_rss, mimetype='text/xml')


    return render(
    return render(
        'results.html',
        'results.html',
@@ -595,6 +546,7 @@ def index():
        current_language=match_language(search_query.lang,
        current_language=match_language(search_query.lang,
                                        LANGUAGE_CODES,
                                        LANGUAGE_CODES,
                                        fallback=settings['search']['language']),
                                        fallback=settings['search']['language']),
        results_image=results_images,
        base_url=get_base_url(),
        base_url=get_base_url(),
        theme=get_current_theme_name(),
        theme=get_current_theme_name(),
        favicons=global_favicons[themes.index(get_current_theme_name())]
        favicons=global_favicons[themes.index(get_current_theme_name())]