2
# Copyright (C) 2006 Robey Pointer <robey@lag.net>
4
# This program is free software; you can redistribute it and/or modify
5
# it under the terms of the GNU General Public License as published by
6
# the Free Software Foundation; either version 2 of the License, or
7
# (at your option) any later version.
9
# This program is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# GNU General Public License for more details.
14
# You should have received a copy of the GNU General Public License
15
# along with this program; if not, write to the Free Software
16
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
indexing of the comment text of revisions, for fast searching.
22
two separate 'shelve' files are created:
24
- recorded: revid -> 1 (if the revid is indexed)
25
- index: 3-letter substring -> list(revids)
35
from loggerhead import util
36
from loggerhead.util import decorator
38
# if any substring index reaches this many revids, replace the entry with
39
# an ALL marker -- it's not worth an explicit index.
44
with_lock = util.with_lock('_lock')
47
def normalize_string(s):
49
remove any punctuation and normalize all whitespace to a single space.
51
s = util.to_utf8(s).lower()
52
# remove apostrophes completely.
53
s = re.sub(r"'", '', s)
54
# convert other garbage into space
55
s = re.sub(r'[^\w\d]', ' ', s)
56
# compress multiple spaces into one.
57
s = re.sub(r'\s{2,}', ' ', s)
58
# and finally remove leading/trailing whitespace
63
class TextIndex (object):
64
def __init__(self, history, cache_path):
65
self.history = history
66
self.log = history.log
68
if not os.path.exists(cache_path):
71
recorded_filename = os.path.join(cache_path, 'textindex-recorded')
72
index_filename = os.path.join(cache_path, 'textindex')
74
self._recorded = shelve.open(recorded_filename, 'c', protocol=2)
75
self._index = shelve.open(index_filename, 'c', protocol=2)
77
self._lock = threading.RLock()
79
self.log.info('Using search index; %d entries.', len(self._recorded))
82
def is_indexed(self, revid):
83
return self._recorded.get(util.to_utf8(revid), None) is not None
87
return len(self._recorded)
91
self._recorded.close()
101
return (len(self._recorded) >= len(self.history.get_revision_history())) and (util.to_utf8(self.history.last_revid) in self._recorded)
104
def index_change(self, change):
106
currently, only indexes the 'comment' field.
108
comment = normalize_string(change.comment)
111
for i in xrange(len(comment) - 2):
112
sub = comment[i:i + 3]
113
revid_set = self._index.get(sub, None)
114
if revid_set is None:
116
elif revid_set == ALL:
117
# this entry got too big
119
revid_set.add(change.revid)
120
if len(revid_set) > ALL_THRESHOLD:
122
self._index[sub] = revid_set
124
self._recorded[util.to_utf8(change.revid)] = True
128
def find(self, text, revid_list=None):
129
text = normalize_string(text)
134
if revid_list is not None:
135
total_set = set(revid_list)
138
for i in xrange(len(text) - 2):
140
revid_set = self._index.get(sub, None)
141
if revid_set is None:
142
# zero matches, stop here.
148
if total_set is None:
149
total_set = revid_set
151
total_set.intersection_update(revid_set)
152
if len(total_set) == 0:
155
# tricky: if seen_all is True, one of the substring indices was ALL
156
# (in other words, unindexed), so our results are actually a superset
157
# of the exact answer.
159
# if we cared, we could do a direct match on the result set and cull
160
# out any that aren't actually matches. for now, i'm gonna say that
161
# we DON'T care, and if one of the substrings hit ALL, there's a small
162
# chance that we'll give a few false positives, and we don't care.
165
def check_rebuild(self, max_time=3600):
167
check if there are any un-indexed revisions, and if so, index them.
168
but don't spend longer than C{max_time} on it.
174
self.log.info('Building search index...')
175
work = list(self.history.get_revision_history())
176
start_time = time.time()
177
last_update = time.time()
181
if not self.is_indexed(revid):
182
self.index_change(self.history.get_changes([ revid ])[0])
186
if now - start_time > 3600:
187
# there's no point working for hours. eventually we might even
188
# hit the next re-index interval, which would suck mightily.
189
self.log.info('Search indexing has worked for an hour; giving up for now.')
192
if now - last_update > 60:
193
self.log.info('Search indexing continues: %d/%d' % (min(count, len(work)), len(work)))
194
last_update = time.time()
196
self.log.info('Search index completed.')