Commit f5eb2d52 authored by Jean-Francois Dockes's avatar Jean-Francois Dockes
Browse files

Process all data as str, not utf-8 bytes

parent f889b57c
......@@ -2,9 +2,9 @@
<div class="search-result">
%number = (query['page'] - 1)*config['perpage'] + i + 1
<div class="search-result-number"><a href="#r{{d['sha']}}">#{{number}}</a></div>
%url = d['url'].replace(b'file://', b'')
%url = d['url'].replace('file://', '')
%for dr, prefix in config['mounts'].items():
%url = url.replace(dr.encode('utf-8'), prefix.encode('utf-8'))
%url = url.replace(dr, prefix)
%end
<div class="search-result-title" id="r{{d['sha']}}" title="{{d['abstract']}}">
%if 'title_link' in config and config['title_link'] != 'download':
......@@ -24,9 +24,9 @@
<div class="search-result-author">{{d['author']}}</div>
%end
<div class="search-result-url">
%urllabel = os.path.dirname(d['url'].replace(b'file://', b''))
%urllabel = os.path.dirname(d['url'].replace('file://', ''))
%for r in config['dirs']:
%urllabel = urllabel.replace(r.encode('utf-8').rsplit(b'/',1)[0] + b'/' , b'')
%urllabel = urllabel.replace(r.rsplit('/',1)[0] + '/' , '')
%end
<a href="{{os.path.dirname(url)}}">{{urllabel}}</a>
</div>
......
......@@ -262,18 +262,18 @@ def recoll_search(q):
for f in FIELDS:
v = getattr(doc, f)
if v is not None:
d[f] = v.encode('utf-8')
d[f] = v
else:
d[f] = b''
d[f] = ''
d['label'] = select([d['title'], d['filename'], '?'], [None, ''])
d['sha'] = hashlib.sha1(d['url']+d['ipath']).hexdigest().encode('utf-8')
d['time'] = timestr(d['mtime'], config['timefmt']).encode('utf-8')
d['sha'] = hashlib.sha1((d['url']+d['ipath']).encode('utf-8')).hexdigest()
d['time'] = timestr(d['mtime'], config['timefmt'])
if 'snippets' in q and q['snippets']:
if 'highlight' in q and q['highlight']:
d['snippet'] = query.makedocabstract(
doc, highlighter).encode('utf-8')
doc, highlighter)
else:
d['snippet'] = query.makedocabstract(doc).encode('utf-8')
d['snippet'] = query.makedocabstract(doc)
#for n,v in d.items():
# print("type(%s) is %s" % (n,type(v)))
results.append(d)
......@@ -374,7 +374,7 @@ def get_json():
for d in res:
ud={}
for f,v in d.items():
ud[f] = v.decode('utf-8')
ud[f] = v
ures.append(ud)
res = ures
return json.dumps({ 'query': query, 'results': res })
......@@ -401,7 +401,7 @@ def get_csv():
row = []
for f in fields:
if f in doc:
row.append(doc[f].decode('utf-8'))
row.append(doc[f])
else:
row.append('')
cw.writerow(row)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment