+ if session:
+ self.session = session
+ else:
+ self.session = connect()
+
+ # Attempt to open or create the index
+ if not os.path.exists(directory) or not os.listdir(directory):
+ # Directory doesn't exist OR is empty; caller needs to use
+ # rebuild_index before doing anything. Provide a dummy object that
+ # complains when used
+ self.index = UninitializedIndex()
+ self.speller = UninitializedIndex()
+ return
+
+ # Otherwise, already exists; should be an index! Bam, done.
+ # Note that this will explode if the directory exists but doesn't
+ # contain an index; that's a feature
+ try:
+ self.index = whoosh.index.open_dir(directory, indexname='MAIN')
+ except whoosh.index.EmptyIndexError:
+ raise IOError(
+ "The index directory already contains files. "
+ "Please use a dedicated directory for the lookup index."
+ )
+
+ # Create speller, and done
+ spell_store = whoosh.filedb.filestore.FileStorage(directory)
+ self.speller = whoosh.spelling.SpellChecker(spell_store,
+ **self.SPELLER_OPTIONS)
+
+
+ def rebuild_index(self):
+ """Creates the index from scratch."""
+
+ schema = whoosh.fields.Schema(
+ name=whoosh.fields.ID(stored=True),
+ table=whoosh.fields.ID(stored=True),
+ row_id=whoosh.fields.ID(stored=True),
+ language=whoosh.fields.STORED,
+ iso639=whoosh.fields.ID(stored=True),
+ iso3166=whoosh.fields.ID(stored=True),
+ display_name=whoosh.fields.STORED, # non-lowercased name
+ )
+
+ if os.path.exists(self.directory):
+ # create_in() isn't totally reliable, so just nuke whatever's there
+ # manually. Try to be careful about this...
+ for f in os.listdir(self.directory):
+ if re.match('^_?(MAIN|SPELL)_', f):
+ os.remove(os.path.join(self.directory, f))
+ else:
+ os.mkdir(self.directory)
+
+ self.index = whoosh.index.create_in(self.directory, schema=schema,
+ indexname='MAIN')
+ writer = self.index.writer()
+
+ # Index every name in all our tables of interest
+ speller_entries = set()
+ for cls in self.indexed_tables.values():
+ q = self.session.query(cls)
+
+ for row in q.yield_per(5):
+ row_key = dict(table=unicode(cls.__tablename__),
+ row_id=unicode(row.id))
+
+ def add(name, language, iso639, iso3166):
+ normalized_name = self.normalize_name(name)
+
+ writer.add_document(
+ name=normalized_name, display_name=name,
+ language=language, iso639=iso639, iso3166=iso3166,
+ **row_key
+ )
+
+ speller_entries.add(normalized_name)
+
+
+ # Add the basic English name to the index
+ if cls == tables.Pokemon:
+ # Don't re-add alternate forms of the same Pokémon; they'll
+ # be added as Pokémon forms instead
+ if not row.is_base_form:
+ continue
+ elif cls == tables.PokemonForm:
+ if row.name:
+ add(row.pokemon_name, None, u'en', u'us')
+ continue
+
+ # Some things also have other languages' names
+ # XXX other language form names..?
+ seen = set()
+ for language, name in getattr(row, 'name_map', {}).items():
+ if name in seen:
+ # Don't add the name again as a different
+ # language; no point and it makes spell results
+ # confusing
+ continue
+ seen.add(name)
+
+ add(name, language.name,
+ language.iso639,
+ language.iso3166)
+
+ # Add Roomaji too
+ if language.identifier == 'ja':
+ roomaji = romanize(name)
+ add(roomaji, u'Roomaji', u'ja', u'jp')
+
+ writer.commit()
+
+ # Construct and populate a spell-checker index. Quicker to do it all
+ # at once, as every call to add_* does a commit(), and those seem to be
+ # expensive
+ self.speller = whoosh.spelling.SpellChecker(self.index.storage, mingram=2,
+ **self.SPELLER_OPTIONS)
+ self.speller.add_words(speller_entries)
+
+
+ def normalize_name(self, name):
+ """Strips irrelevant formatting junk from name input.
+
+ Specifically: everything is lowercased, and accents are removed.
+ """
+ # http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
+ # Makes sense to me. Decompose by Unicode rules, then remove combining
+ # characters, then recombine. I'm explicitly doing it this way instead
+ # of testing combining() because Korean characters apparently
+ # decompose! But the results are considered letters, not combining
+ # characters, so testing for Mn works well, and combining them again
+ # makes them look right.
+ nkfd_form = unicodedata.normalize('NFKD', unicode(name))
+ name = u"".join(c for c in nkfd_form
+ if unicodedata.category(c) != 'Mn')
+ name = unicodedata.normalize('NFC', name)
+
+ name = name.strip()
+ name = name.lower()
+
+ return name
+
+
+ def _apply_valid_types(self, name, valid_types):
+ """Combines the enforced `valid_types` with any from the search string
+ itself and updates the query.
+
+ For example, a name of 'a,b:foo' and valid_types of b,c will search for
+ only `b`s named "foo".
+
+ Returns `(name, merged_valid_types, term)`, where `name` has had any type
+ prefix stripped, `merged_valid_types` combines the original
+ `valid_types` with the type prefix, and `term` is a query term for
+ limited to just the allowed types. If there are no type restrictions
+ at all, `term` will be None.
+ """
+
+ # Remove any type prefix (pokemon:133) first
+ user_valid_types = []
+ if ':' in name:
+ prefix_chunk, name = name.split(':', 1)
+ name = name.strip()
+
+ prefixes = prefix_chunk.split(',')
+ user_valid_types = []
+ for prefix in prefixes:
+ prefix = prefix.strip()
+ if prefix:
+ user_valid_types.append(prefix)
+
+ # Merge the valid types together. Only types that appear in BOTH lists
+ # may be used.
+ # As a special case, if the user asked for types that are explicitly
+ # forbidden, completely ignore what the user requested.
+ # And, just to complicate matters: "type" and language need to be
+ # considered separately.
+ def merge_requirements(func):
+ user = filter(func, user_valid_types)
+ system = filter(func, valid_types)
+
+ if user and system:
+ merged = list(set(user) & set(system))
+ if merged:
+ return merged
+ else:
+ # No overlap; use the system restrictions
+ return system
+ else:
+ # One or the other is blank; use the one that's not
+ return user or system
+
+ # @foo means language must be foo; otherwise it's a table name
+ lang_requirements = merge_requirements(lambda req: req[0] == u'@')
+ type_requirements = merge_requirements(lambda req: req[0] != u'@')
+ all_requirements = lang_requirements + type_requirements
+
+ # Construct the term
+ lang_terms = []
+ for lang in lang_requirements:
+ # Allow for either country or language codes
+ lang_code = lang[1:]
+ lang_terms.append(whoosh.query.Term(u'iso639', lang_code))
+ lang_terms.append(whoosh.query.Term(u'iso3166', lang_code))
+
+ type_terms = []
+ for type in type_requirements:
+ table_name = self._parse_table_name(type)
+
+ # Quietly ignore bogus valid_types; more likely to DTRT
+ if table_name:
+ type_terms.append(whoosh.query.Term(u'table', table_name))
+
+ # Combine both kinds of restriction
+ all_terms = []
+ if type_terms:
+ all_terms.append(whoosh.query.Or(type_terms))
+ if lang_terms:
+ all_terms.append(whoosh.query.Or(lang_terms))
+
+ return name, all_requirements, whoosh.query.And(all_terms)
+
+
+ def _parse_table_name(self, name):
+ """Takes a singular table name, table name, or table object and returns
+ the table name.
+
+ Returns None for a bogus name.
+ """
+ # Table object
+ if hasattr(name, '__tablename__'):
+ return getattr(name, '__tablename__')
+
+ # Table name
+ for table in self.indexed_tables.values():
+ if name in (table.__tablename__, table.__singlename__):
+ return table.__tablename__
+
+ # Bogus. Be nice and return dummy
+ return None