Commit c8e88aee authored by Jean-Francois Dockes's avatar Jean-Francois Dockes
Browse files

uprcl: improved search

parent 9cdb9565
......@@ -18,23 +18,35 @@
#
# Object Id prefix: 0$uprcl$folders
#
# Obect id inside the section.
# Container: $d<diridx> where <diridx> indexes into our directory vector.
# Item: $i<docidx> where <docidx> indexex into the docs vector.
#
# Data structure:
#
# The _dirvec vector has one entry for each directory. Each entry is a
# dictionary, mapping the names inside the directory to a pair
# (diridx,docidx), where:
# - diridx is an index into dirvec if the name is a directory, else -1
# - docidx is the index of the doc inside the doc array, or -1 if:
# - There is no doc entry, which could possibly happen if there is
# no result for an intermediary element in a path,
# The _rcldocs list has one entry for each document in the index (mime:* search)
#
# The _dirvec list has one entry for each directory. Directories are
# created as needed by splitting the paths/urls from _rcldocs (and
# possibly adding some for contentgroups). Directories have no
# direct relation with the index objects, they are identified by their
# _dirvec index
#
# Obect ids inside the section:
# Container: $d<diridx> where <diridx> indexes into _dirvec
# Item: $i<docidx> where <docidx> indexes into _rcldocs
#
# Each _dirvec entry is a Python dict, mapping the directory entries'
# names to a pair (diridx,docidx), where:
#
# - diridx is an index into _dirvec if the name is a directory, else -1
# - docidx is an index into _rcldocs, or -1 if:
# - There is no _rcldocs entry, which could possibly happen if
# there is no result for an intermediary element in a path,
# because of some recoll issue, or because this is a synthetic
# 'contentgroup' entry.
# - Or if the doc was not yet seen, the index will then be updated
# when we see it.
# - Or, while we build the structure, temporarily, if the doc was
# not yet seen. The value will then be updated when we see it.
#
# Note: docidx is usually set in the pair for a directory, but I don't
# think that it is ever used. The Recoll doc for a directory has
# nothing very interesting in it.
#
# Each directory has a special ".." entry with a diridx pointing to
# the parent directory. This allows building a path from a container
......@@ -45,6 +57,14 @@
# Entry 0 in _dirvec is special: it holds the 'topdirs' from the recoll
# configuration. The entries are paths instead of simple names, and
# the docidx is 0. The diridx points to a dirvec entry.
#
# We also build an _xid2idx xdocid->objidx map to allow a Recoll
# item search result to be connected back to the folders tree.
# I'm not sure that this is at all useful (bogus objids for items in
# search results are quite probably ok). Also quite probably, this
# could also be done using the URL, as it is what we use to build the
# folders tree in the first place.
# _xid2idx is currently desactivated (see comment)
import os
import shlex
......@@ -88,10 +108,49 @@ class Folders(object):
return len(self._dirvec) - 1
# The root entry (diridx 0) is special because its keys are the
# topdirs paths, not simple names. We look with what topdir path
# this doc belongs to, then return the appropriate diridx and the
# split remainder of the path
def _pathbeyondtopdirs(self, doc):
url = doc.getbinurl().decode('utf-8', errors='replace')
url = url[7:]
# Determine the root entry (topdirs element). Special because
# its path is not a simple name.
fathidx = -1
for rtpath,idx in self._dirvec[0].items():
#uplog("type(url) %s type(rtpath) %s rtpath %s url %s" %
# (type(url),type(rtpath),rtpath, url))
if url.startswith(rtpath):
fathidx = idx[0]
break
if fathidx == -1:
uplog("No parent in topdirs: %s" % url)
return None,None
# Compute rest of path. If there is none, we're not interested.
url1 = url[len(rtpath):]
if len(url1) == 0:
return None,None
# If there is a contentgroup field, just add it as a virtual
# directory in the path. This only affects the visible tree,
# not the 'real' URLs of course.
if doc.contentgroup:
a = os.path.dirname(url1)
b = os.path.basename(url1)
url1 = os.path.join(a, doc.contentgroup, b)
# Split path, then walk the vector, possibly creating
# directory entries as needed
path = url1.split('/')[1:]
return fathidx, path
# Walk the recoll docs array and split the URLs paths to build the
# [folders] data structure
def _rcl2folders(self, confdir):
self._dirvec = []
self._xid2idx = {}
start = timer()
rclconf = rclconfig.RclConfig(confdir)
......@@ -127,6 +186,11 @@ class Folders(object):
if doc.mtype not in audiomtypes:
continue
# For linking item search results to the main
# array. Deactivated for now as it does not seem to be
# needed.
#self._xid2idx[doc.xdocid] = docidx
# Possibly enrich the doc entry with a cover art uri.
arturi = docarturi(doc, self._httphp, self._pprefix)
if arturi:
......@@ -134,37 +198,10 @@ class Folders(object):
# it as a doc attribute
doc.albumarturi = arturi
url = doc.getbinurl().decode('utf-8', errors='replace')
url = url[7:]
# Determine the root entry (topdirs element). Special because
# its path is not a simple name.
fathidx = -1
for rtpath,idx in self._dirvec[0].items():
#uplog("type(url) %s type(rtpath) %s rtpath %s url %s" %
# (type(url),type(rtpath),rtpath, url))
if url.startswith(rtpath):
fathidx = idx[0]
break
if fathidx == -1:
uplog("No parent in topdirs: %s" % url)
continue
# Compute rest of path
url1 = url[len(rtpath):]
if len(url1) == 0:
fathidx, path = self._pathbeyondtopdirs(doc)
if not fathidx:
continue
# If there is a contentgroup field, just add it as a virtual
# directory in the path. This only affects the visible tree,
# not the 'real' URLs of course.
if doc.contentgroup:
a = os.path.dirname(url1)
b = os.path.basename(url1)
url1 = os.path.join(a, doc.contentgroup, b)
# Split path, then walk the vector, possibly creating
# directory entries as needed
path = url1.split('/')[1:]
#uplog("%s"%path, file=sys.stderr)
for idx in range(len(path)):
elt = path[idx]
......@@ -376,3 +413,37 @@ class Folders(object):
path += elt + "/"
return path
# Compute object id for doc out of recoll search
def _objidforxdocid(self, doc):
if doc.xdocid not in self._xid2idx:
return None
return self._idprefix + '$i' + str(self._xid2idx[doc.xdocid])
# Only works for directories but we do not check. Caller beware.
def _objidforurl(self, doc):
fathidx, path = self._pathbeyondtopdirs(doc)
if not fathidx:
return None
for idx in range(len(path)):
elt = path[idx]
if not elt in self._dirvec[fathidx]:
uplog("objidforurl: element %s has no directory entry" % elt)
return None
# Update fathidx for next iteration
fathidx = self._dirvec[fathidx][elt][0]
return self._idprefix + '$d' + str(fathidx)
def objidfordoc(self, doc):
if doc.mtype == 'inode/directory':
id = self._objidforurl(doc)
else:
id = self._objidforxdocid(doc)
if not id:
id = self._idprefix + '$' + 'seeyoulater'
return id
......@@ -70,7 +70,7 @@ def _update_index():
# Wait for indexer
while not uprclindex.indexerdone():
time.sleep(.5)
fin = timer()
fin = timer()
uplog("Indexing took %.2f Seconds" % (fin - start))
folders = Folders(g_rclconfdir, g_httphp, g_pathprefix)
......@@ -155,7 +155,7 @@ def _uprcl_init_worker():
'pathprefix':g_pathprefix})
httpthread.daemon = True
httpthread.start()
_update_index()
uplog("Init done")
......
......@@ -21,7 +21,7 @@ import re
from recoll import recoll
from upmplgutils import uplog
from uprclutils import stringToStrings, rcldoctoentry, cmpentries
from uprclutils import stringToStrings, rcldoctoentry, cmpentries, upnp2rclfields
def _getchar(s, i):
if i < len(s):
......@@ -38,13 +38,12 @@ def _readword(s, i):
return j,w
# Called with '"' already read.
# Upnp search term strings are double quoted, but we should not take
# them as recoll phrases. We separate parts which are internally
# quoted, and become phrases, and lists of words which we interpret as
# an and search (comma-separated). Internal quotes come backslash-escaped
# an AND search (comma-separated). Internal quotes come backslash-escaped
def _parsestring(s, i=0):
uplog("parseString: input: <%s>" % s[i:])
#uplog("parseString: input: <%s>" % s[i:])
# First change '''"hello \"one phrase\"''' world" into
# '''hello "one phrase" world'''
# Note that we can't handle quoted dquotes inside string
......@@ -84,8 +83,20 @@ def _parsestring(s, i=0):
tokens = stringToStrings(str)
return j, tokens
def _appendterms(out, v, field, oper):
uplog("_appendterms: v %s field <%s> oper <%s>" % (v,field,oper))
def _searchClauses(out, field, oper, words, phrases):
if words:
out.append(field)
out.append(oper)
out.append(words)
for ph in phrases:
out.append(field)
out.append(oper)
out.append('"' + ph + '"')
return out
def _separatePhrasesAndWords(v):
swords = ""
phrases = []
for w in v:
......@@ -95,22 +106,57 @@ def _appendterms(out, v, field, oper):
swords += w
else:
phrases.append(w)
out.append(swords)
for ph in phrases:
out.append(field)
out.append(oper)
out.append('"' + ph + '"')
return (swords, phrases)
# the v list contains terms and phrases. Fields maybe several space
# separated field specs, which we should OR (ex: for search title or
# filename).
def _makeSearchExp(out, v, field, oper, neg):
uplog("_makeSearchExp: v <%s> field <%s> oper <%s> neg <%s>" %
(v, field, oper, neg))
if oper == 'I':
return
swords,phrases = _separatePhrasesAndWords(v)
if neg:
out.append(" -")
# Special-case 'title' because we want to also match directory names
# ((title:keyword) OR (filename:keyword AND mime:inode/directory))
if field == 'title':
fields = (field, 'filename')
else:
fields = (field,)
if len(fields) > 1:
out.append(" (")
for i in range(len(fields)):
field = fields[i]
out.append(" (")
_searchClauses(out, field, oper, swords, phrases)
if i == 1:
out.append(" AND mime:inode/directory")
out.append(")")
if len(fields) == 2 and i == 0:
out.append(" OR ")
if len(fields) > 1:
out.append(") ")
def _upnpsearchtorecoll(s):
uplog("_upnpsearchtorecoll:in: <%s>" % s)
s = re.sub('[\t\n\r\f ]+', ' ', s)
out = []
hadDerived = False
i = 0
field = ""
oper = ""
neg = False
i = 0
while True:
i,c = _getchar(s, i)
if not c:
......@@ -134,10 +180,10 @@ def _upnpsearchtorecoll(s):
else:
if c == '"':
i,v = _parsestring(s, i)
uplog("_parsestring ret: %s" % v)
_appendterms(out, v, field, oper)
oper = ""
_makeSearchExp(out, v, field, oper, neg)
field = ""
oper = ""
neg = False
continue
else:
i -= 1
......@@ -145,26 +191,19 @@ def _upnpsearchtorecoll(s):
#uplog("_readword returned <%s>" % w)
if w == 'contains':
out.append(':')
oper = ':'
elif w == 'doesNotContain':
if len(out) < 1:
raise Exception("doesNotContain can't be the first word")
out.insert(-1, "-")
out.append(':')
oper = ':'
elif w == 'derivedFrom':
hadDerived = True
out.append(':')
oper = ':'
elif w == 'true':
out.append('*')
oper = ""
elif w == 'false':
out.append('xxxjanzocsduochterrrrm')
elif w == 'exists':
out.append(':')
neg = True
oper = ':'
elif w == 'derivedFrom' or w == 'exists':
# upnp:class derivedfrom "object.container.album"
# exists??
# can't use this, will be ignored
oper = 'I'
elif w == 'true' or w == 'false':
# Don't know what to do with this. Just ignore it,
# by not calling makeSearchExp.
pass
elif w == 'and':
# Recoll has implied AND, but see next
pass
......@@ -177,16 +216,11 @@ def _upnpsearchtorecoll(s):
else:
try:
field = upnp2rclfields[w]
except:
field = w
out.append(field)
oper = ""
except Exception as ex:
#uplog("Field translation error: %s"%ex)
field = (w,)
ostr = ""
for tok in out:
ostr += tok + " "
uplog("_upnpsearchtorecoll:out: <%s>" % ostr)
return ostr
return " ".join(out)
def search(foldersobj, rclconfdir, objid, upnps, idprefix, httphp, pathprefix):
......@@ -215,7 +249,7 @@ def search(foldersobj, rclconfdir, objid, upnps, idprefix, httphp, pathprefix):
while True:
docs = rclq.fetchmany()
for doc in docs:
id = idprefix + '$' + 'seeyoulater'
id = foldersobj.objidfordoc(doc)
e = rcldoctoentry(id, objid, httphp, pathprefix, doc)
if e:
entries.append(e)
......
......@@ -57,7 +57,7 @@ audiomtypes = frozenset([
# rclaudio and the Recoll configuration 'fields' file, and what
# plgwithslave.cxx expects, which is less than consistent.
upnp2rclfields = {
'upnp:album': 'album',
'upnp:album' : 'album',
'upnp:artist' : 'artist',
'comment' : 'comment',
'composer' : 'composer',
......@@ -72,7 +72,8 @@ upnp2rclfields = {
'res:samplefreq' : 'sample_rate',
'res:size' : 'fbytes',
'tt' : 'title',
'upnp:originalTrackNumber' : 'tracknumber',
'dc:title' : 'title',
'upnp:originalTrackNumber' : 'tracknumber'
}
def _httpurl(httphp, path, query=''):
......@@ -80,7 +81,7 @@ def _httpurl(httphp, path, query=''):
def rcldoctoentry(id, pid, httphp, pathprefix, doc):
"""
Transform a Doc objects into the format expected by the parent
Transform a Doc object into the format expected by the parent
Args:
id (str): objid for the entry
......@@ -279,20 +280,24 @@ def _cmpentries_func(e1, e2):
isct2 = tp2 == 'ct'
# Containers come before items, and are sorted in alphabetic order
ret = -2
if isct1 and not isct2:
return 1
ret = -1
elif not isct1 and isct2:
return -1
ret = 1
elif isct1 and isct2:
tt1 = e1['tt']
tt2 = e2['tt']
if tt1.lower() < tt2.lower():
return -1
ret = -1
elif tt1.lower() > tt2.lower():
return 1
ret = 1
else:
return 0
ret = 0
if ret != -2:
#uplog("cmpentries tp1 %s tp2 %s, returning %d"%(tp1,tp2,ret))
return ret
# Tracks. Sort by album then directory then track number
k = 'upnp:album'
a1 = e1[k] if k in e1 else ""
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment