From: William Morgan Date: Tue, 8 Sep 2009 19:58:28 +0000 (-0400) Subject: Merge branch 'custom-search-hook' X-Git-Url: https://git.cworth.org/git?a=commitdiff_plain;h=8903cdedc810b5570b5d2cfb35d60683782aa84a;p=sup Merge branch 'custom-search-hook' Conflicts: lib/sup/hook.rb lib/sup/index.rb --- 8903cdedc810b5570b5d2cfb35d60683782aa84a diff --cc lib/sup/ferret_index.rb index df1139d,0000000..d605e8d mode 100644,000000..100644 --- a/lib/sup/ferret_index.rb +++ b/lib/sup/ferret_index.rb @@@ -1,463 -1,0 +1,471 @@@ +require 'ferret' + +module Redwood + +class FerretIndex < BaseIndex + ++ HookManager.register "custom-search", < :body, :analyzer => @analyzer, :or_default => false + end + + def load_index dir=File.join(@dir, "ferret") + if File.exists? dir + debug "loading index..." + @index_mutex.synchronize do + @index = Ferret::Index::Index.new(:path => dir, :analyzer => @analyzer, :id_field => 'message_id') + debug "loaded index of #{@index.size} messages" + end + else + debug "creating index..." + @index_mutex.synchronize do + field_infos = Ferret::Index::FieldInfos.new :store => :yes + field_infos.add_field :message_id, :index => :untokenized + field_infos.add_field :source_id + field_infos.add_field :source_info + field_infos.add_field :date, :index => :untokenized + field_infos.add_field :body + field_infos.add_field :label + field_infos.add_field :attachments + field_infos.add_field :subject + field_infos.add_field :from + field_infos.add_field :to + field_infos.add_field :refs + field_infos.add_field :snippet, :index => :no, :term_vector => :no + field_infos.create_index dir + @index = Ferret::Index::Index.new(:path => dir, :analyzer => @analyzer, :id_field => 'message_id') + end + end + end + + def add_message m; sync_message m end + def update_message m; sync_message m end + def update_message_state m; sync_message m end + + def sync_message m, opts={} + entry = @index[m.id] + + raise "no source info for message #{m.id}" unless m.source && m.source_info + + source_id = if m.source.is_a? Integer + m.source + else + m.source.id or raise "unregistered source #{m.source} (id #{m.source.id.inspect})" + end + + snippet = if m.snippet_contains_encrypted_content? && $config[:discard_snippets_from_encrypted_messages] + "" + else + m.snippet + end + + ## write the new document to the index. if the entry already exists in the + ## index, reuse it (which avoids having to reload the entry from the source, + ## which can be quite expensive for e.g. large threads of IMAP actions.) + ## + ## exception: if the index entry belongs to an earlier version of the + ## message, use everything from the new message instead, but union the + ## flags. this allows messages sent to mailing lists to have their header + ## updated and to have flags set properly. + ## + ## minor hack: messages in sources with lower ids have priority over + ## messages in sources with higher ids. so messages in the inbox will + ## override everyone, and messages in the sent box will be overridden + ## by everyone else. + ## + ## written in this manner to support previous versions of the index which + ## did not keep around the entry body. upgrading is thus seamless. + entry ||= {} + labels = m.labels # override because this is the new state, unless... + + ## if we are a later version of a message, ignore what's in the index, + ## but merge in the labels. + if entry[:source_id] && entry[:source_info] && entry[:label] && + ((entry[:source_id].to_i > source_id) || (entry[:source_info].to_i < m.source_info)) + labels += entry[:label].to_set_of_symbols + #debug "found updated version of message #{m.id}: #{m.subj}" + #debug "previous version was at #{entry[:source_id].inspect}:#{entry[:source_info].inspect}, this version at #{source_id.inspect}:#{m.source_info.inspect}" + #debug "merged labels are #{labels.inspect} (index #{entry[:label].inspect}, message #{m.labels.inspect})" + entry = {} + end + + ## if force_overwite is true, ignore what's in the index. this is used + ## primarily by sup-sync to force index updates. + entry = {} if opts[:force_overwrite] + + d = { + :message_id => m.id, + :source_id => source_id, + :source_info => m.source_info, + :date => (entry[:date] || m.date.to_indexable_s), + :body => (entry[:body] || m.indexable_content), + :snippet => snippet, # always override + :label => labels.to_a.join(" "), + :attachments => (entry[:attachments] || m.attachments.uniq.join(" ")), + + ## always override :from and :to. + ## older versions of Sup would often store the wrong thing in the index + ## (because they were canonicalizing email addresses, resulting in the + ## wrong name associated with each.) the correct address is read from + ## the original header when these messages are opened in thread-view-mode, + ## so this allows people to forcibly update the address in the index by + ## marking those threads for saving. + :from => (m.from ? m.from.indexable_content : ""), + :to => (m.to + m.cc + m.bcc).map { |x| x.indexable_content }.join(" "), + + :subject => (entry[:subject] || wrap_subj(Message.normalize_subj(m.subj))), + :refs => (entry[:refs] || (m.refs + m.replytos).uniq.join(" ")), + } + + @index_mutex.synchronize do + @index.delete m.id + @index.add_document d + end + end + private :sync_message + + def save_index fn=File.join(@dir, "ferret") + # don't have to do anything, apparently + end + + def contains_id? id + @index_mutex.synchronize { @index.search(Ferret::Search::TermQuery.new(:message_id, id)).total_hits > 0 } + end + + def size + @index_mutex.synchronize { @index.size } + end + + EACH_BY_DATE_NUM = 100 + def each_id_by_date query={} + return if empty? # otherwise ferret barfs ###TODO: remove this once my ferret patch is accepted + ferret_query = build_ferret_query query + offset = 0 + while true + limit = (query[:limit])? [EACH_BY_DATE_NUM, query[:limit] - offset].min : EACH_BY_DATE_NUM + results = @index_mutex.synchronize { @index.search ferret_query, :sort => "date DESC", :limit => limit, :offset => offset } + debug "got #{results.total_hits} results for query (offset #{offset}) #{ferret_query.inspect}" + results.hits.each do |hit| + yield @index_mutex.synchronize { @index[hit.doc][:message_id] }, lambda { build_message hit.doc } + end + break if query[:limit] and offset >= query[:limit] - limit + break if offset >= results.total_hits - limit + offset += limit + end + end + + def num_results_for query={} + return 0 if empty? # otherwise ferret barfs ###TODO: remove this once my ferret patch is accepted + ferret_query = build_ferret_query query + @index_mutex.synchronize { @index.search(ferret_query, :limit => 1).total_hits } + end + + SAME_SUBJECT_DATE_LIMIT = 7 + MAX_CLAUSES = 1000 + def each_message_in_thread_for m, opts={} + #debug "Building thread for #{m.id}: #{m.subj}" + messages = {} + searched = {} + num_queries = 0 + + pending = [m.id] + if $config[:thread_by_subject] # do subject queries + date_min = m.date - (SAME_SUBJECT_DATE_LIMIT * 12 * 3600) + date_max = m.date + (SAME_SUBJECT_DATE_LIMIT * 12 * 3600) + + q = Ferret::Search::BooleanQuery.new true + sq = Ferret::Search::PhraseQuery.new(:subject) + wrap_subj(Message.normalize_subj(m.subj)).split.each do |t| + sq.add_term t + end + q.add_query sq, :must + q.add_query Ferret::Search::RangeQuery.new(:date, :>= => date_min.to_indexable_s, :<= => date_max.to_indexable_s), :must + + q = build_ferret_query :qobj => q + + p1 = @index_mutex.synchronize { @index.search(q).hits.map { |hit| @index[hit.doc][:message_id] } } + debug "found #{p1.size} results for subject query #{q}" + + p2 = @index_mutex.synchronize { @index.search(q.to_s, :limit => :all).hits.map { |hit| @index[hit.doc][:message_id] } } + debug "found #{p2.size} results in string form" + + pending = (pending + p1 + p2).uniq + end + + until pending.empty? || (opts[:limit] && messages.size >= opts[:limit]) + q = Ferret::Search::BooleanQuery.new true + # this disappeared in newer ferrets... wtf. + # q.max_clause_count = 2048 + + lim = [MAX_CLAUSES / 2, pending.length].min + pending[0 ... lim].each do |id| + searched[id] = true + q.add_query Ferret::Search::TermQuery.new(:message_id, id), :should + q.add_query Ferret::Search::TermQuery.new(:refs, id), :should + end + pending = pending[lim .. -1] + + q = build_ferret_query :qobj => q + + num_queries += 1 + killed = false + @index_mutex.synchronize do + @index.search_each(q, :limit => :all) do |docid, score| + break if opts[:limit] && messages.size >= opts[:limit] + if @index[docid][:label].split(/\s+/).include?("killed") && opts[:skip_killed] + killed = true + break + end + mid = @index[docid][:message_id] + unless messages.member?(mid) + #debug "got #{mid} as a child of #{id}" + messages[mid] ||= lambda { build_message docid } + refs = @index[docid][:refs].split + pending += refs.select { |id| !searched[id] } + end + end + end + end + + if killed + #debug "thread for #{m.id} is killed, ignoring" + false + else + #debug "ran #{num_queries} queries to build thread of #{messages.size} messages for #{m.id}: #{m.subj}" if num_queries > 0 + messages.each { |mid, builder| yield mid, builder } + true + end + end + + ## builds a message object from a ferret result + def build_message docid + @index_mutex.synchronize do + doc = @index[docid] or return + + source = SourceManager[doc[:source_id].to_i] + raise "invalid source #{doc[:source_id]}" unless source + + #puts "building message #{doc[:message_id]} (#{source}##{doc[:source_info]})" + + fake_header = { + "date" => Time.at(doc[:date].to_i), + "subject" => unwrap_subj(doc[:subject]), + "from" => doc[:from], + "to" => doc[:to].split.join(", "), # reformat + "message-id" => doc[:message_id], + "references" => doc[:refs].split.map { |x| "<#{x}>" }.join(" "), + } + + m = Message.new :source => source, :source_info => doc[:source_info].to_i, + :labels => doc[:label].to_set_of_symbols, + :snippet => doc[:snippet] + m.parse_header fake_header + m + end + end + + def delete id + @index_mutex.synchronize { @index.delete id } + end + + def load_contacts emails, h={} + q = Ferret::Search::BooleanQuery.new true + emails.each do |e| + qq = Ferret::Search::BooleanQuery.new true + qq.add_query Ferret::Search::TermQuery.new(:from, e), :should + qq.add_query Ferret::Search::TermQuery.new(:to, e), :should + q.add_query qq + end + q.add_query Ferret::Search::TermQuery.new(:label, "spam"), :must_not + + debug "contact search: #{q}" + contacts = {} + num = h[:num] || 20 + @index_mutex.synchronize do + @index.search_each q, :sort => "date DESC", :limit => :all do |docid, score| + break if contacts.size >= num + #debug "got message #{docid} to: #{@index[docid][:to].inspect} and from: #{@index[docid][:from].inspect}" + f = @index[docid][:from] + t = @index[docid][:to] + + if AccountManager.is_account_email? f + t.split(" ").each { |e| contacts[Person.from_address(e)] = true } + else + contacts[Person.from_address(f)] = true + end + end + end + + contacts.keys.compact + end + + def each_id query={} + ferret_query = build_ferret_query query + results = @index_mutex.synchronize { @index.search ferret_query, :limit => (query[:limit] || :all) } + results.hits.map { |hit| yield @index[hit.doc][:message_id] } + end + + def optimize + @index_mutex.synchronize { @index.optimize } + end + + def source_for_id id + entry = @index[id] + return unless entry + entry[:source_id].to_i + end + + class ParseError < StandardError; end + + ## parse a query string from the user. returns a query object + ## that can be passed to any index method with a 'query' + ## argument, as well as build_ferret_query. + ## + ## raises a ParseError if something went wrong. + def parse_query s + query = {} + ++ subs = HookManager.run("custom-search", :subs => s) || s + subs = s.gsub(/\b(to|from):(\S+)\b/) do + field, name = $1, $2 + if(p = ContactManager.contact_for(name)) + [field, p.email] + elsif name == "me" + [field, "(" + AccountManager.user_emails.join("||") + ")"] + else + [field, name] + end.join(":") + end + + ## if we see a label:deleted or a label:spam term anywhere in the query + ## string, we set the extra load_spam or load_deleted options to true. + ## bizarre? well, because the query allows arbitrary parenthesized boolean + ## expressions, without fully parsing the query, we can't tell whether + ## the user is explicitly directing us to search spam messages or not. + ## e.g. if the string is -(-(-(-(-label:spam)))), does the user want to + ## search spam messages or not? + ## + ## so, we rely on the fact that turning these extra options ON turns OFF + ## the adding of "-label:deleted" or "-label:spam" terms at the very + ## final stage of query processing. if the user wants to search spam + ## messages, not adding that is the right thing; if he doesn't want to + ## search spam messages, then not adding it won't have any effect. + query[:load_spam] = true if subs =~ /\blabel:spam\b/ + query[:load_deleted] = true if subs =~ /\blabel:deleted\b/ + + ## gmail style "is" operator + subs = subs.gsub(/\b(is|has):(\S+)\b/) do + field, label = $1, $2 + case label + when "read" + "-label:unread" + when "spam" + query[:load_spam] = true + "label:spam" + when "deleted" + query[:load_deleted] = true + "label:deleted" + else + "label:#{$2}" + end + end + + ## gmail style attachments "filename" and "filetype" searches + subs = subs.gsub(/\b(filename|filetype):(\((.+?)\)\B|(\S+)\b)/) do + field, name = $1, ($3 || $4) + case field + when "filename" + debug "filename: translated #{field}:#{name} to attachments:(#{name.downcase})" + "attachments:(#{name.downcase})" + when "filetype" + debug "filetype: translated #{field}:#{name} to attachments:(*.#{name.downcase})" + "attachments:(*.#{name.downcase})" + end + end + + if $have_chronic + subs = subs.gsub(/\b(before|on|in|during|after):(\((.+?)\)\B|(\S+)\b)/) do + field, datestr = $1, ($3 || $4) + realdate = Chronic.parse datestr, :guess => false, :context => :past + if realdate + case field + when "after" + debug "chronic: translated #{field}:#{datestr} to #{realdate.end}" + "date:(>= #{sprintf "%012d", realdate.end.to_i})" + when "before" + debug "chronic: translated #{field}:#{datestr} to #{realdate.begin}" + "date:(<= #{sprintf "%012d", realdate.begin.to_i})" + else + debug "chronic: translated #{field}:#{datestr} to #{realdate}" + "date:(<= #{sprintf "%012d", realdate.end.to_i}) date:(>= #{sprintf "%012d", realdate.begin.to_i})" + end + else + raise ParseError, "can't understand date #{datestr.inspect}" + end + end + end + + ## limit:42 restrict the search to 42 results + subs = subs.gsub(/\blimit:(\S+)\b/) do + lim = $1 + if lim =~ /^\d+$/ + query[:limit] = lim.to_i + '' + else + raise ParseError, "non-numeric limit #{lim.inspect}" + end + end + + begin + query[:qobj] = @qparser.parse(subs) + query[:text] = s + query + rescue Ferret::QueryParser::QueryParseException => e + raise ParseError, e.message + end + end + +private + + def build_ferret_query query + q = Ferret::Search::BooleanQuery.new + q.add_query Ferret::Search::MatchAllQuery.new, :must + q.add_query query[:qobj], :must if query[:qobj] + labels = ([query[:label]] + (query[:labels] || [])).compact + labels.each { |t| q.add_query Ferret::Search::TermQuery.new("label", t.to_s), :must } + if query[:participants] + q2 = Ferret::Search::BooleanQuery.new + query[:participants].each do |p| + q2.add_query Ferret::Search::TermQuery.new("from", p.email), :should + q2.add_query Ferret::Search::TermQuery.new("to", p.email), :should + end + q.add_query q2, :must + end + + q.add_query Ferret::Search::TermQuery.new("label", "spam"), :must_not unless query[:load_spam] || labels.include?(:spam) + q.add_query Ferret::Search::TermQuery.new("label", "deleted"), :must_not unless query[:load_deleted] || labels.include?(:deleted) + q.add_query Ferret::Search::TermQuery.new("label", "killed"), :must_not if query[:skip_killed] + + q.add_query Ferret::Search::TermQuery.new("source_id", query[:source_id]), :must if query[:source_id] + q + end + + def wrap_subj subj; "__START_SUBJECT__ #{subj} __END_SUBJECT__"; end + def unwrap_subj subj; subj =~ /__START_SUBJECT__ (.*?) __END_SUBJECT__/ && $1; end +end + +end diff --cc lib/sup/xapian_index.rb index 1395601,0000000..ab25ea0 mode 100644,000000..100644 --- a/lib/sup/xapian_index.rb +++ b/lib/sup/xapian_index.rb @@@ -1,558 -1,0 +1,566 @@@ +require 'xapian' +require 'set' + +module Redwood + +# This index implementation uses Xapian for searching and GDBM for storage. It +# tends to be slightly faster than Ferret for indexing and significantly faster +# for searching due to precomputing thread membership. +class XapianIndex < BaseIndex + STEM_LANGUAGE = "english" + INDEX_VERSION = '1' + + ## dates are converted to integers for xapian, and are used for document ids, + ## so we must ensure they're reasonably valid. this typically only affect + ## spam. + MIN_DATE = Time.at 0 + MAX_DATE = Time.at(2**31-1) + ++ HookManager.register "custom-search", < source, :source_info => entry[:source_info], + :labels => entry[:labels], :snippet => entry[:snippet] + + mk_person = lambda { |x| Person.new(*x.reverse!) } + entry[:from] = mk_person[entry[:from]] + entry[:to].map!(&mk_person) + entry[:cc].map!(&mk_person) + entry[:bcc].map!(&mk_person) + + m.load_from_index! entry + m + end + + def add_message m; sync_message m end + def update_message m; sync_message m end + def update_message_state m; sync_message m end + + def sync_message m, opts={} + entry = synchronize { get_entry m.id } + snippet = m.snippet + entry ||= {} + labels = m.labels + entry = {} if opts[:force_overwrite] + + d = { + :message_id => m.id, + :source_id => m.source.id, + :source_info => m.source_info, + :date => (entry[:date] || m.date), + :snippet => snippet, + :labels => labels, + :from => (entry[:from] || [m.from.email, m.from.name]), + :to => (entry[:to] || m.to.map { |p| [p.email, p.name] }), + :cc => (entry[:cc] || m.cc.map { |p| [p.email, p.name] }), + :bcc => (entry[:bcc] || m.bcc.map { |p| [p.email, p.name] }), + :subject => m.subj, + :refs => (entry[:refs] || m.refs), + :replytos => (entry[:replytos] || m.replytos), + } + + labels.each { |l| LabelManager << l } + + synchronize do + index_message m, d, opts + end + true + end + private :sync_message + + def num_results_for query={} + xapian_query = build_xapian_query query + matchset = run_query xapian_query, 0, 0, 100 + matchset.matches_estimated + end + + EACH_ID_PAGE = 100 + def each_id query={} + offset = 0 + page = EACH_ID_PAGE + + xapian_query = build_xapian_query query + while true + ids = run_query_ids xapian_query, offset, (offset+page) + ids.each { |id| yield id } + break if ids.size < page + offset += page + end + end + + def each_id_by_date query={} + each_id(query) { |id| yield id, lambda { build_message id } } + end + + def each_message_in_thread_for m, opts={} + # TODO thread by subject + # TODO handle killed threads + return unless doc = find_doc(m.id) + queue = doc.value(THREAD_VALUENO).split(',') + msgids = [m.id] + seen_threads = Set.new + seen_messages = Set.new [m.id] + while not queue.empty? + thread_id = queue.pop + next if seen_threads.member? thread_id + return false if thread_killed? thread_id + seen_threads << thread_id + docs = term_docids(mkterm(:thread, thread_id)).map { |x| @xapian.document x } + docs.each do |doc| + msgid = doc.value MSGID_VALUENO + next if seen_messages.member? msgid + msgids << msgid + seen_messages << msgid + queue.concat doc.value(THREAD_VALUENO).split(',') + end + end + msgids.each { |id| yield id, lambda { build_message id } } + true + end + + def load_contacts emails, opts={} + contacts = Set.new + num = opts[:num] || 20 + each_id_by_date :participants => emails do |id,b| + break if contacts.size >= num + m = b.call + ([m.from]+m.to+m.cc+m.bcc).compact.each { |p| contacts << [p.name, p.email] } + end + contacts.to_a.compact.map { |n,e| Person.new n, e }[0...num] + end + + # TODO share code with the Ferret index + def parse_query s + query = {} + ++ subs = HookManager.run("custom-search", :subs => s) || s + subs = s.gsub(/\b(to|from):(\S+)\b/) do + field, name = $1, $2 + if(p = ContactManager.contact_for(name)) + [field, p.email] + elsif name == "me" + [field, "(" + AccountManager.user_emails.join("||") + ")"] + else + [field, name] + end.join(":") + end + + ## if we see a label:deleted or a label:spam term anywhere in the query + ## string, we set the extra load_spam or load_deleted options to true. + ## bizarre? well, because the query allows arbitrary parenthesized boolean + ## expressions, without fully parsing the query, we can't tell whether + ## the user is explicitly directing us to search spam messages or not. + ## e.g. if the string is -(-(-(-(-label:spam)))), does the user want to + ## search spam messages or not? + ## + ## so, we rely on the fact that turning these extra options ON turns OFF + ## the adding of "-label:deleted" or "-label:spam" terms at the very + ## final stage of query processing. if the user wants to search spam + ## messages, not adding that is the right thing; if he doesn't want to + ## search spam messages, then not adding it won't have any effect. + query[:load_spam] = true if subs =~ /\blabel:spam\b/ + query[:load_deleted] = true if subs =~ /\blabel:deleted\b/ + + ## gmail style "is" operator + subs = subs.gsub(/\b(is|has):(\S+)\b/) do + field, label = $1, $2 + case label + when "read" + "-label:unread" + when "spam" + query[:load_spam] = true + "label:spam" + when "deleted" + query[:load_deleted] = true + "label:deleted" + else + "label:#{$2}" + end + end + + ## gmail style attachments "filename" and "filetype" searches + subs = subs.gsub(/\b(filename|filetype):(\((.+?)\)\B|(\S+)\b)/) do + field, name = $1, ($3 || $4) + case field + when "filename" + debug "filename: translated #{field}:#{name} to attachment:\"#{name.downcase}\"" + "attachment:\"#{name.downcase}\"" + when "filetype" + debug "filetype: translated #{field}:#{name} to attachment_extension:#{name.downcase}" + "attachment_extension:#{name.downcase}" + end + end + + if $have_chronic + lastdate = 2<<32 - 1 + firstdate = 0 + subs = subs.gsub(/\b(before|on|in|during|after):(\((.+?)\)\B|(\S+)\b)/) do + field, datestr = $1, ($3 || $4) + realdate = Chronic.parse datestr, :guess => false, :context => :past + if realdate + case field + when "after" + debug "chronic: translated #{field}:#{datestr} to #{realdate.end}" + "date:#{realdate.end.to_i}..#{lastdate}" + when "before" + debug "chronic: translated #{field}:#{datestr} to #{realdate.begin}" + "date:#{firstdate}..#{realdate.end.to_i}" + else + debug "chronic: translated #{field}:#{datestr} to #{realdate}" + "date:#{realdate.begin.to_i}..#{realdate.end.to_i}" + end + else + raise ParseError, "can't understand date #{datestr.inspect}" + end + end + end + + ## limit:42 restrict the search to 42 results + subs = subs.gsub(/\blimit:(\S+)\b/) do + lim = $1 + if lim =~ /^\d+$/ + query[:limit] = lim.to_i + '' + else + raise ParseError, "non-numeric limit #{lim.inspect}" + end + end + + qp = Xapian::QueryParser.new + qp.database = @xapian + qp.stemmer = Xapian::Stem.new(STEM_LANGUAGE) + qp.stemming_strategy = Xapian::QueryParser::STEM_SOME + qp.default_op = Xapian::Query::OP_AND + qp.add_valuerangeprocessor(Xapian::NumberValueRangeProcessor.new(DATE_VALUENO, 'date:', true)) + NORMAL_PREFIX.each { |k,v| qp.add_prefix k, v } + BOOLEAN_PREFIX.each { |k,v| qp.add_boolean_prefix k, v } + xapian_query = qp.parse_query(subs, Xapian::QueryParser::FLAG_PHRASE|Xapian::QueryParser::FLAG_BOOLEAN|Xapian::QueryParser::FLAG_LOVEHATE|Xapian::QueryParser::FLAG_WILDCARD, PREFIX['body']) + + raise ParseError if xapian_query.nil? or xapian_query.empty? + query[:qobj] = xapian_query + query[:text] = s + query + end + + private + + # Stemmed + NORMAL_PREFIX = { + 'subject' => 'S', + 'body' => 'B', + 'from_name' => 'FN', + 'to_name' => 'TN', + 'name' => 'N', + 'attachment' => 'A', + } + + # Unstemmed + BOOLEAN_PREFIX = { + 'type' => 'K', + 'from_email' => 'FE', + 'to_email' => 'TE', + 'email' => 'E', + 'date' => 'D', + 'label' => 'L', + 'source_id' => 'I', + 'attachment_extension' => 'O', + 'msgid' => 'Q', + 'thread' => 'H', + 'ref' => 'R', + } + + PREFIX = NORMAL_PREFIX.merge BOOLEAN_PREFIX + + MSGID_VALUENO = 0 + THREAD_VALUENO = 1 + DATE_VALUENO = 2 + + MAX_TERM_LENGTH = 245 + + # Xapian can very efficiently sort in ascending docid order. Sup always wants + # to sort by descending date, so this method maps between them. In order to + # handle multiple messages per second, we use a logistic curve centered + # around MIDDLE_DATE so that the slope (docid/s) is greatest in this time + # period. A docid collision is not an error - the code will pick the next + # smallest unused one. + DOCID_SCALE = 2.0**32 + TIME_SCALE = 2.0**27 + MIDDLE_DATE = Time.gm(2011) + def assign_docid m, truncated_date + t = (truncated_date.to_i - MIDDLE_DATE.to_i).to_f + docid = (DOCID_SCALE - DOCID_SCALE/(Math::E**(-(t/TIME_SCALE)) + 1)).to_i + while docid > 0 and docid_exists? docid + docid -= 1 + end + docid > 0 ? docid : nil + end + + # XXX is there a better way? + def docid_exists? docid + begin + @xapian.doclength docid + true + rescue RuntimeError #Xapian::DocNotFoundError + raise unless $!.message =~ /DocNotFoundError/ + false + end + end + + def term_docids term + @xapian.postlist(term).map { |x| x.docid } + end + + def find_docid id + docids = term_docids(mkterm(:msgid,id)) + fail unless docids.size <= 1 + docids.first + end + + def find_doc id + return unless docid = find_docid(id) + @xapian.document docid + end + + def get_id docid + return unless doc = @xapian.document(docid) + doc.value MSGID_VALUENO + end + + def get_entry id + return unless doc = find_doc(id) + Marshal.load doc.data + end + + def thread_killed? thread_id + not run_query(Q.new(Q::OP_AND, mkterm(:thread, thread_id), mkterm(:label, :Killed)), 0, 1).empty? + end + + def synchronize &b + @index_mutex.synchronize &b + end + + def run_query xapian_query, offset, limit, checkatleast=0 + synchronize do + @enquire.query = xapian_query + @enquire.mset(offset, limit-offset, checkatleast) + end + end + + def run_query_ids xapian_query, offset, limit + matchset = run_query xapian_query, offset, limit + matchset.matches.map { |r| r.document.value MSGID_VALUENO } + end + + Q = Xapian::Query + def build_xapian_query opts + labels = ([opts[:label]] + (opts[:labels] || [])).compact + neglabels = [:spam, :deleted, :killed].reject { |l| (labels.include? l) || opts.member?("load_#{l}".intern) } + pos_terms, neg_terms = [], [] + + pos_terms << mkterm(:type, 'mail') + pos_terms.concat(labels.map { |l| mkterm(:label,l) }) + pos_terms << opts[:qobj] if opts[:qobj] + pos_terms << mkterm(:source_id, opts[:source_id]) if opts[:source_id] + + if opts[:participants] + participant_terms = opts[:participants].map { |p| mkterm(:email,:any, (Redwood::Person === p) ? p.email : p) } + pos_terms << Q.new(Q::OP_OR, participant_terms) + end + + neg_terms.concat(neglabels.map { |l| mkterm(:label,l) }) + + pos_query = Q.new(Q::OP_AND, pos_terms) + neg_query = Q.new(Q::OP_OR, neg_terms) + + if neg_query.empty? + pos_query + else + Q.new(Q::OP_AND_NOT, [pos_query, neg_query]) + end + end + + def index_message m, entry, opts + terms = [] + text = [] + + subject_text = m.indexable_subject + body_text = m.indexable_body + + # Person names are indexed with several prefixes + person_termer = lambda do |d| + lambda do |p| + ["#{d}_name", "name", "body"].each do |x| + text << [p.name, PREFIX[x]] + end if p.name + [d, :any].each { |x| terms << mkterm(:email, x, p.email) } + end + end + + person_termer[:from][m.from] if m.from + (m.to+m.cc+m.bcc).each(&(person_termer[:to])) + + terms << mkterm(:date,m.date) if m.date + m.labels.each { |t| terms << mkterm(:label,t) } + terms << mkterm(:type, 'mail') + terms << mkterm(:msgid, m.id) + terms << mkterm(:source_id, m.source.id) + m.attachments.each do |a| + a =~ /\.(\w+)$/ or next + t = mkterm(:attachment_extension, $1) + terms << t + end + + ## Thread membership + children = term_docids(mkterm(:ref, m.id)).map { |docid| @xapian.document docid } + parent_ids = m.refs + m.replytos + parents = parent_ids.map { |id| find_doc id }.compact + thread_members = SavingHash.new { [] } + (children + parents).each do |doc2| + thread_ids = doc2.value(THREAD_VALUENO).split ',' + thread_ids.each { |thread_id| thread_members[thread_id] << doc2 } + end + + thread_ids = thread_members.empty? ? [m.id] : thread_members.keys + + thread_ids.each { |thread_id| terms << mkterm(:thread, thread_id) } + parent_ids.each do |ref| + terms << mkterm(:ref, ref) + end + + # Full text search content + text << [subject_text, PREFIX['subject']] + text << [subject_text, PREFIX['body']] + text << [body_text, PREFIX['body']] + m.attachments.each { |a| text << [a, PREFIX['attachment']] } + + truncated_date = if m.date < MIN_DATE + debug "warning: adjusting too-low date #{m.date} for indexing" + MIN_DATE + elsif m.date > MAX_DATE + debug "warning: adjusting too-high date #{m.date} for indexing" + MAX_DATE + else + m.date + end + + # Date value for range queries + date_value = begin + Xapian.sortable_serialise truncated_date.to_i + rescue TypeError + Xapian.sortable_serialise 0 + end + + docid = nil + unless doc = find_doc(m.id) + doc = Xapian::Document.new + if not docid = assign_docid(m, truncated_date) + # Could be triggered by spam + Redwood::log "warning: docid underflow, dropping #{m.id.inspect}" + return + end + else + doc.clear_terms + doc.clear_values + docid = doc.docid + end + + @term_generator.document = doc + text.each { |text,prefix| @term_generator.index_text text, 1, prefix } + terms.each { |term| doc.add_term term if term.length <= MAX_TERM_LENGTH } + doc.add_value MSGID_VALUENO, m.id + doc.add_value THREAD_VALUENO, (thread_ids * ',') + doc.add_value DATE_VALUENO, date_value + doc.data = Marshal.dump entry + + @xapian.replace_document docid, doc + end + + # Construct a Xapian term + def mkterm type, *args + case type + when :label + PREFIX['label'] + args[0].to_s.downcase + when :type + PREFIX['type'] + args[0].to_s.downcase + when :date + PREFIX['date'] + args[0].getutc.strftime("%Y%m%d%H%M%S") + when :email + case args[0] + when :from then PREFIX['from_email'] + when :to then PREFIX['to_email'] + when :any then PREFIX['email'] + else raise "Invalid email term type #{args[0]}" + end + args[1].to_s.downcase + when :source_id + PREFIX['source_id'] + args[0].to_s.downcase + when :attachment_extension + PREFIX['attachment_extension'] + args[0].to_s.downcase + when :msgid, :ref, :thread + PREFIX[type.to_s] + args[0][0...(MAX_TERM_LENGTH-1)] + else + raise "Invalid term type #{type}" + end + end + +end + +end