X-Git-Url: http://git.veekun.com/zzz-pokedex.git/blobdiff_plain/5bbbc254aeeb41c1b3421ffb3188f0833d0a8d7b..c1477c828d390407cd53f25c0bb333cda5caa8d0:/pokedex/lookup.py?ds=inline diff --git a/pokedex/lookup.py b/pokedex/lookup.py index b31dce8..8488f21 100644 --- a/pokedex/lookup.py +++ b/pokedex/lookup.py @@ -1,7 +1,5 @@ # encoding: utf8 -from collections import namedtuple import os, os.path -import pkg_resources import random import re import shutil @@ -16,440 +14,585 @@ from whoosh.qparser import QueryParser import whoosh.scoring import whoosh.spelling +from pokedex.util import namedtuple + from pokedex.db import connect import pokedex.db.tables as tables from pokedex.roomaji import romanize +from pokedex.defaults import get_default_index_dir -__all__ = ['open_index', 'lookup', 'random_lookup'] - -INTERMEDIATE_LOOKUP_RESULTS = 25 -MAX_LOOKUP_RESULTS = 10 - -# Dictionary of table name => table class. -# Need the table name so we can get the class from the table name after we -# retrieve something from the index -indexed_tables = {} -for cls in [ - tables.Ability, - tables.Item, - tables.Move, - tables.Pokemon, - tables.Type, - ]: - indexed_tables[cls.__tablename__] = cls - -def normalize(name): - """Strips irrelevant formatting junk from name input. - - Specifically: everything is lowercased, and accents are removed. - """ - # http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string - # Makes sense to me. Decompose by Unicode rules, then remove combining - # characters, then recombine. I'm explicitly doing it this way instead of - # testing combining() because Korean characters apparently decompose! But - # the results are considered letters, not combining characters, so testing - # for Mn works well, and combining them again makes them look right. - nkfd_form = unicodedata.normalize('NFKD', unicode(name)) - name = u"".join(c for c in nkfd_form - if unicodedata.category(c) != 'Mn') - name = unicodedata.normalize('NFC', name) - - name = name.strip() - name = name.lower() - - return name - - -def open_index(directory=None, session=None, recreate=False): - """Opens the whoosh index stored in the named directory and returns (index, - speller). If the index doesn't already exist, it will be created. - - `directory` - Directory containing the index. Defaults to a location within the - `pokedex` egg directory. - - `session` - If the index needs to be created, this database session will be used. - Defaults to an attempt to connect to the default SQLite database - installed by `pokedex setup`. - - `recreate` - If set to True, the whoosh index will be created even if it already - exists. - """ - - # Defaults - if not directory: - directory = pkg_resources.resource_filename('pokedex', - 'data/whoosh-index') - - if not session: - session = connect() - - # Attempt to open or create the index - directory_exists = os.path.exists(directory) - if directory_exists and not recreate: - # Already exists; should be an index! - try: - index = whoosh.index.open_dir(directory, indexname='MAIN') - spell_store = whoosh.filedb.filestore.FileStorage(directory) - speller = whoosh.spelling.SpellChecker(spell_store) - return index, speller - except whoosh.index.EmptyIndexError as e: - # Apparently not a real index. Fall out of the if and create it - pass - - # Delete and start over if we're going to bail anyway. - if directory_exists and recreate: - # Be safe and only delete if it looks like a whoosh index, i.e., - # everything starts with _ - if all(f[0] == '_' for f in os.listdir(directory)): - shutil.rmtree(directory) - directory_exists = False - - if not directory_exists: - os.mkdir(directory) - - - ### Create index - schema = whoosh.fields.Schema( - name=whoosh.fields.ID(stored=True), - table=whoosh.fields.ID(stored=True), - row_id=whoosh.fields.ID(stored=True), - language=whoosh.fields.STORED, - iso3166=whoosh.fields.STORED, - display_name=whoosh.fields.STORED, # non-lowercased name - forme_name=whoosh.fields.ID, - ) +__all__ = ['PokedexLookup'] - index = whoosh.index.create_in(directory, schema=schema, indexname='MAIN') - writer = index.writer() - - # Index every name in all our tables of interest - # speller_entries becomes a list of (word, score) tuples; the score is 2 - # for English names, 1.5 for Roomaji, and 1 for everything else. I think - # this biases the results in the direction most people expect, especially - # when e.g. German names are very similar to English names - speller_entries = [] - for cls in indexed_tables.values(): - q = session.query(cls) - - for row in q.yield_per(5): - # XXX need to give forme_name a dummy value because I can't search - # for explicitly empty fields. boo. - row_key = dict(table=unicode(cls.__tablename__), - row_id=unicode(row.id), - forme_name=u'XXX') - - def add(name, language, iso3166, score): - normalized_name = normalize(name) - writer.add_document(name=normalized_name, display_name=name, - language=language, - iso3166=iso3166, - **row_key) - speller_entries.append((normalized_name, score)) - - # If this is a form, mark it as such - if getattr(row, 'forme_base_pokemon_id', None): - row_key['forme_name'] = row.forme_name - - name = row.name - add(name, None, u'us', 1) - - # Pokemon also get other languages - for foreign_name in getattr(row, 'foreign_names', []): - moonspeak = foreign_name.name - if name == moonspeak: - # Don't add the English name again as a different language; - # no point and it makes spell results confusing - continue - add(moonspeak, foreign_name.language.name, - foreign_name.language.iso3166, - 3) +rx_is_number = re.compile('^\d+$') - # Add Roomaji too - if foreign_name.language.name == 'Japanese': - roomaji = romanize(foreign_name.name) - add(roomaji, u'Roomaji', u'jp', 8) +LookupResult = namedtuple('LookupResult', [ + 'object', 'indexed_name', 'name', 'language', 'iso639', 'iso3166', 'exact', +]) - writer.commit() +class UninitializedIndex(object): + class UninitializedIndexError(Exception): + pass - # Construct and populate a spell-checker index. Quicker to do it all - # at once, as every call to add_* does a commit(), and those seem to be - # expensive - speller = whoosh.spelling.SpellChecker(index.storage) - speller.add_scored_words(speller_entries) + def __nonzero__(self): + """Dummy object should identify itself as False.""" + return False - return index, speller + def __bool__(self): + """Python 3000 version of the above. Future-proofing rules!""" + return False + def __getattr__(self, *args, **kwargs): + raise self.UninitializedIndexError( + "The lookup index does not exist. Please use `pokedex setup` " + "or lookup.rebuild_index() to create it." + ) class LanguageWeighting(whoosh.scoring.Weighting): """A scoring class that forces otherwise-equal English results to come before foreign results. """ + def __init__(self, extra_weights={}, *args, **kwargs): + """`extra_weights` may be a dictionary of weights which will be + factored in. + + Intended for use with spelling corrections, which come along with their + own weightings. + """ + self.extra_weights = extra_weights + super(LanguageWeighting, self).__init__(*args, **kwargs) + def score(self, searcher, fieldnum, text, docnum, weight, QTF=1): doc = searcher.stored_fields(docnum) - if doc['language'] == None: + + # Apply extra weight + weight = weight * self.extra_weights.get(text, 1.0) + + language = doc.get('language') + if language is None: # English (well, "default"); leave it at 1 return weight - elif doc['language'] == u'Roomaji': - # Give Roomaji a bit of a boost, as it's most likely to be searched - return weight * 0.95 + elif language == u'Roomaji': + # Give Roomaji a little boost; it's most likely to be searched + return weight * 0.9 else: # Everything else can drop down the totem pole - return weight * 0.9 + return weight * 0.8 + + +class PokedexLookup(object): + MAX_FUZZY_RESULTS = 10 + MAX_EXACT_RESULTS = 43 + INTERMEDIATE_FACTOR = 2 + + # The speller only checks how much the input matches a word; there can be + # all manner of extra unmatched junk, and it won't affect the weighting. + # To compensate, greatly boost the weighting of matches at the beginning + # and end, so nearly-full-word-matches are much better + SPELLER_OPTIONS = dict(booststart=10.0, boostend=9.0) + + # Dictionary of table name => table class. + # Need the table name so we can get the class from the table name after we + # retrieve something from the index + indexed_tables = dict( + (cls.__tablename__, cls) + for cls in ( + tables.Ability, + tables.Item, + tables.Location, + tables.Move, + tables.Nature, + tables.Pokemon, + tables.PokemonForm, + tables.Type, + ) + ) -rx_is_number = re.compile('^\d+$') -LookupResult = namedtuple('LookupResult', - ['object', 'name', 'language', 'iso3166', 'exact']) + def __init__(self, directory=None, session=None): + """Opens the whoosh index stored in the named directory. If the index + doesn't already exist, it will be created. -def _parse_table_name(name): - """Takes a singular table name, table name, or table object and returns the - table name. + `directory` + Directory containing the index. Defaults to a location within the + `pokedex` egg directory. - Returns None for a bogus name. - """ - if hasattr(name, '__tablename__'): - return getattr(name, '__tablename__') - elif name in indexed_tables: - return name - elif name + 's' in indexed_tables: - return name + 's' - else: - # Bogus. Be nice and return dummy - return None + `session` + Used for creating the index and retrieving objects. Defaults to an + attempt to connect to the default SQLite database installed by + `pokedex setup`. + """ -def _whoosh_records_to_results(records, session, exact=True): - """Converts a list of whoosh's indexed records to LookupResult tuples - containing database objects. - """ - # XXX this 'exact' thing is getting kinda leaky. would like a better way - # to handle it, since only lookup() cares about fuzzy results - seen = {} - results = [] - for record in records: - # Skip dupes - seen_key = record['table'], record['row_id'] - if seen_key in seen: - continue - seen[seen_key] = True - - cls = indexed_tables[record['table']] - obj = session.query(cls).get(record['row_id']) - - results.append(LookupResult(object=obj, - name=record['display_name'], - language=record['language'], - iso3166=record['iso3166'], - exact=exact)) - - return results - - -def lookup(input, valid_types=[], session=None, indices=None, exact_only=False): - """Attempts to find some sort of object, given a database session and name. - - Returns a list of named (object, name, language, iso3166, exact) tuples. - `object` is a database object, `name` is the name under which the object - was found, `language` and `iso3166` are the name and country code of the - language in which the name was found, and `exact` is True iff this was an - exact match. - - This function currently ONLY does fuzzy matching if there are no exact - matches. - - Formes are not returned unless requested; "Shaymin" will return only grass - Shaymin. - - Extraneous whitespace is removed with extreme prejudice. - - Recognizes: - - Names: "Eevee", "Surf", "Run Away", "Payapa Berry", etc. - - Foreign names: "Iibui", "Eivui" - - Fuzzy names in whatever language: "Evee", "Ibui" - - IDs: "133", "192", "250" - Also: - - Type restrictions. "type:psychic" will only return the type. This is - how to make ID lookup useful. Multiple type specs can be entered with - commas, as "move,item:1". If `valid_types` are provided, any type prefix - will be ignored. - - Alternate formes can be specified merely like "wash rotom". - - `input` - Name of the thing to look for. - - `valid_types` - A list of table objects or names, e.g., `['pokemon', 'moves']`. If - this is provided, only results in one of the given tables will be - returned. - - `session` - A database session to use for retrieving objects. As with get_index, - if this is not provided, a connection to the default database will be - attempted. - - `indices` - Tuple of index, speller as returned from `open_index()`. Defaults to - a call to `open_index()`. - - `exact_only` - If True, only exact matches are returned. If set to False (the - default), and the provided `name` doesn't match anything exactly, - spelling correction will be attempted. - """ + # By the time this returns, self.index, self.speller, and self.session + # must be set + + # If a directory was not given, use the default + if directory is None: + directory = get_default_index_dir() - if not session: - session = connect() + self.directory = directory - if indices: - index, speller = indices - else: - index, speller = open_index() + if session: + self.session = session + else: + self.session = connect() + + # Attempt to open or create the index + if not os.path.exists(directory) or not os.listdir(directory): + # Directory doesn't exist OR is empty; caller needs to use + # rebuild_index before doing anything. Provide a dummy object that + # complains when used + self.index = UninitializedIndex() + self.speller = UninitializedIndex() + return + + # Otherwise, already exists; should be an index! Bam, done. + # Note that this will explode if the directory exists but doesn't + # contain an index; that's a feature + try: + self.index = whoosh.index.open_dir(directory, indexname='MAIN') + except whoosh.index.EmptyIndexError: + raise IOError( + "The index directory already contains files. " + "Please use a dedicated directory for the lookup index." + ) + + # Create speller, and done + spell_store = whoosh.filedb.filestore.FileStorage(directory) + self.speller = whoosh.spelling.SpellChecker(spell_store, + **self.SPELLER_OPTIONS) + + + def rebuild_index(self): + """Creates the index from scratch.""" + + schema = whoosh.fields.Schema( + name=whoosh.fields.ID(stored=True), + table=whoosh.fields.ID(stored=True), + row_id=whoosh.fields.ID(stored=True), + language=whoosh.fields.STORED, + iso639=whoosh.fields.ID(stored=True), + iso3166=whoosh.fields.ID(stored=True), + display_name=whoosh.fields.STORED, # non-lowercased name + ) + + if os.path.exists(self.directory): + # create_in() isn't totally reliable, so just nuke whatever's there + # manually. Try to be careful about this... + for f in os.listdir(self.directory): + if re.match('^_?(MAIN|SPELL)_', f): + os.remove(os.path.join(self.directory, f)) + else: + os.mkdir(self.directory) + + self.index = whoosh.index.create_in(self.directory, schema=schema, + indexname='MAIN') + writer = self.index.writer() + + # Index every name in all our tables of interest + speller_entries = set() + for cls in self.indexed_tables.values(): + q = self.session.query(cls) + + for row in q.yield_per(5): + row_key = dict(table=unicode(cls.__tablename__), + row_id=unicode(row.id)) + + def add(name, language, iso639, iso3166): + normalized_name = self.normalize_name(name) + + writer.add_document( + name=normalized_name, display_name=name, + language=language, iso639=iso639, iso3166=iso3166, + **row_key + ) + + speller_entries.add(normalized_name) + + + # Add the basic English name to the index + if cls == tables.Pokemon: + # Don't re-add alternate forms of the same Pokémon; they'll + # be added as Pokémon forms instead + if not row.is_base_form: + continue + elif cls == tables.PokemonForm: + if row.name: + add(row.pokemon_name, None, u'en', u'us') + continue - name = normalize(input) - exact = True - form = None + # Some things also have other languages' names + # XXX other language form names..? + seen = set() + for language, name in getattr(row, 'name_map', {}).items(): + if name in seen: + # Don't add the name again as a different + # language; no point and it makes spell results + # confusing + continue + seen.add(name) + + add(name, language.name, + language.iso639, + language.iso3166) + + # Add Roomaji too + if language.identifier == 'ja': + roomaji = romanize(name) + add(roomaji, u'Roomaji', u'ja', u'jp') + + writer.commit() + + # Construct and populate a spell-checker index. Quicker to do it all + # at once, as every call to add_* does a commit(), and those seem to be + # expensive + self.speller = whoosh.spelling.SpellChecker(self.index.storage, mingram=2, + **self.SPELLER_OPTIONS) + self.speller.add_words(speller_entries) + + + def normalize_name(self, name): + """Strips irrelevant formatting junk from name input. + + Specifically: everything is lowercased, and accents are removed. + """ + # http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string + # Makes sense to me. Decompose by Unicode rules, then remove combining + # characters, then recombine. I'm explicitly doing it this way instead + # of testing combining() because Korean characters apparently + # decompose! But the results are considered letters, not combining + # characters, so testing for Mn works well, and combining them again + # makes them look right. + nkfd_form = unicodedata.normalize('NFKD', unicode(name)) + name = u"".join(c for c in nkfd_form + if unicodedata.category(c) != 'Mn') + name = unicodedata.normalize('NFC', name) - # Remove any type prefix (pokemon:133) before constructing a query - if ':' in name: - prefix_chunk, name = name.split(':', 1) name = name.strip() + name = name.lower() - if not valid_types: - # Only use types from the query string if none were explicitly - # provided - prefixes = prefix_chunk.split(',') - valid_types = [_.strip() for _ in prefixes] - - # Random lookup - if name == 'random': - return random_lookup(indices=(index, speller), - session=session, - valid_types=valid_types) - - # Do different things depending what the query looks like - # Note: Term objects do an exact match, so we don't have to worry about a - # query parser tripping on weird characters in the input - if '*' in name or '?' in name: - exact_only = True - query = whoosh.query.Wildcard(u'name', name) - elif rx_is_number.match(name): - # Don't spell-check numbers! - exact_only = True - query = whoosh.query.Term(u'row_id', name) - else: - # Not an integer - query = whoosh.query.Term(u'name', name) \ - & whoosh.query.Term(u'forme_name', u'XXX') - - # If there's a space in the input, this might be a form - if ' ' in name: - form, formless_name = name.split(' ', 1) - form_query = whoosh.query.Term(u'name', formless_name) \ - & whoosh.query.Term(u'forme_name', form) - query = query | form_query - - ### Filter by type of object - type_terms = [] - for valid_type in valid_types: - table_name = _parse_table_name(valid_type) - if table_name: - # Quietly ignore bogus valid_types; more likely to DTRT - type_terms.append(whoosh.query.Term(u'table', table_name)) + return name - if type_terms: - query = query & whoosh.query.Or(type_terms) + def _apply_valid_types(self, name, valid_types): + """Combines the enforced `valid_types` with any from the search string + itself and updates the query. - ### Actual searching - searcher = index.searcher() - searcher.weighting = LanguageWeighting() # XXX kosher? docs say search() - # takes a weighting kw but it - # certainly does not - results = searcher.search(query, limit=INTERMEDIATE_LOOKUP_RESULTS) + For example, a name of 'a,b:foo' and valid_types of b,c will search for + only `b`s named "foo". - # Look for some fuzzy matches if necessary - if not exact_only and not results: - exact = False - results = [] + Returns `(name, merged_valid_types, term)`, where `name` has had any type + prefix stripped, `merged_valid_types` combines the original + `valid_types` with the type prefix, and `term` is a query term for + limited to just the allowed types. If there are no type restrictions + at all, `term` will be None. + """ - for suggestion in speller.suggest(name, INTERMEDIATE_LOOKUP_RESULTS): - query = whoosh.query.Term('name', suggestion) - results.extend(searcher.search(query)) + # Remove any type prefix (pokemon:133) first + user_valid_types = [] + if ':' in name: + prefix_chunk, name = name.split(':', 1) + name = name.strip() - ### Convert results to db objects - objects = _whoosh_records_to_results(results, session, exact=exact) + prefixes = prefix_chunk.split(',') + user_valid_types = [] + for prefix in prefixes: + prefix = prefix.strip() + if prefix: + user_valid_types.append(prefix) + + # Merge the valid types together. Only types that appear in BOTH lists + # may be used. + # As a special case, if the user asked for types that are explicitly + # forbidden, completely ignore what the user requested. + # And, just to complicate matters: "type" and language need to be + # considered separately. + def merge_requirements(func): + user = filter(func, user_valid_types) + system = filter(func, valid_types) + + if user and system: + merged = list(set(user) & set(system)) + if merged: + return merged + else: + # No overlap; use the system restrictions + return system + else: + # One or the other is blank; use the one that's not + return user or system + + # @foo means language must be foo; otherwise it's a table name + lang_requirements = merge_requirements(lambda req: req[0] == u'@') + type_requirements = merge_requirements(lambda req: req[0] != u'@') + all_requirements = lang_requirements + type_requirements + + # Construct the term + lang_terms = [] + for lang in lang_requirements: + # Allow for either country or language codes + lang_code = lang[1:] + lang_terms.append(whoosh.query.Term(u'iso639', lang_code)) + lang_terms.append(whoosh.query.Term(u'iso3166', lang_code)) + + type_terms = [] + for type in type_requirements: + table_name = self._parse_table_name(type) - # Only return up to 10 matches; beyond that, something is wrong. - # We strip out duplicate entries above, so it's remotely possible that we - # should have more than 10 here and lost a few. The speller returns 25 to - # give us some padding, and should avoid that problem. Not a big deal if - # we lose the 25th-most-likely match anyway. - return objects[:MAX_LOOKUP_RESULTS] + # Quietly ignore bogus valid_types; more likely to DTRT + if table_name: + type_terms.append(whoosh.query.Term(u'table', table_name)) + # Combine both kinds of restriction + all_terms = [] + if type_terms: + all_terms.append(whoosh.query.Or(type_terms)) + if lang_terms: + all_terms.append(whoosh.query.Or(lang_terms)) -def random_lookup(valid_types=[], session=None, indices=None): - """Takes similar arguments as `lookup()`, but returns a random lookup - result from one of the provided `valid_types`. - """ + return name, all_requirements, whoosh.query.And(all_terms) - tables = [] - for valid_type in valid_types: - table_name = _parse_table_name(valid_type) - if table_name: - tables.append(indexed_tables[table_name]) - - if not tables: - # n.b.: It's possible we got a list of valid_types and none of them - # were valid, but this function is guaranteed to return *something*, so - # it politely selects from the entire index isntead - tables = indexed_tables.values() - - # Rather than create an array of many hundred items and pick randomly from - # it, just pick a number up to the total number of potential items, then - # pick randomly from that, and partition the whole range into chunks. - # This also avoids the slight problem that the index contains more rows - # (for languages) for some items than others. - # XXX ought to cache this (in the index?) if possible - total = 0 - partitions = [] - for table in tables: - count = session.query(table).count() - total += count - partitions.append((table, count)) - - n = random.randint(1, total) - while n > partitions[0][1]: - n -= partitions[0][1] - partitions.pop(0) - - return lookup(unicode(n), valid_types=[ partitions[0][0] ], - indices=indices, session=session) - -def prefix_lookup(prefix, session=None, indices=None): - """Returns terms starting with the given exact prefix. - - No special magic is currently done with the name; type prefixes are not - recognized. - - `session` and `indices` are treated as with `lookup()`. - """ - if not session: - session = connect() + def _parse_table_name(self, name): + """Takes a singular table name, table name, or table object and returns + the table name. - if indices: - index, speller = indices - else: - index, speller = open_index() + Returns None for a bogus name. + """ + # Table object + if hasattr(name, '__tablename__'): + return getattr(name, '__tablename__') - query = whoosh.query.Prefix(u'name', normalize(prefix)) + # Table name + for table in self.indexed_tables.values(): + if name in (table.__tablename__, table.__singlename__): + return table.__tablename__ - searcher = index.searcher() - searcher.weighting = LanguageWeighting() - results = searcher.search(query) # XXX , limit=MAX_LOOKUP_RESULTS) + # Bogus. Be nice and return dummy + return None - return _whoosh_records_to_results(results, session) + def _whoosh_records_to_results(self, records, exact=True): + """Converts a list of whoosh's indexed records to LookupResult tuples + containing database objects. + """ + # XXX this 'exact' thing is getting kinda leaky. would like a better + # way to handle it, since only lookup() cares about fuzzy results + seen = {} + results = [] + for record in records: + # Skip dupes + seen_key = record['table'], record['row_id'] + if seen_key in seen: + continue + seen[seen_key] = True + + cls = self.indexed_tables[record['table']] + obj = self.session.query(cls).get(record['row_id']) + + results.append(LookupResult(object=obj, + indexed_name=record['name'], + name=record['display_name'], + language=record.get('language'), + iso639=record['iso639'], + iso3166=record['iso3166'], + exact=exact)) + + return results + + + def lookup(self, input, valid_types=[], exact_only=False): + """Attempts to find some sort of object, given a name. + + Returns a list of named (object, name, language, iso639, iso3166, + exact) tuples. `object` is a database object, `name` is the name under + which the object was found, `language` and the two isos are the name + and country codes of the language in which the name was found, and + `exact` is True iff this was an exact match. + + This function currently ONLY does fuzzy matching if there are no exact + matches. + + Formes are not returned unless requested; "Shaymin" will return only + grass Shaymin. + + Extraneous whitespace is removed with extreme prejudice. + + Recognizes: + - Names: "Eevee", "Surf", "Run Away", "Payapa Berry", etc. + - Foreign names: "Iibui", "Eivui" + - Fuzzy names in whatever language: "Evee", "Ibui" + - IDs: "133", "192", "250" + Also: + - Type restrictions. "type:psychic" will only return the type. This + is how to make ID lookup useful. Multiple type specs can be entered + with commas, as "move,item:1". + - Language restrictions. "@fr:charge" will only return Tackle, which + is called "Charge" in French. These can be combined with type + restrictions, e.g., "@fr,move:charge". + - Alternate formes can be specified merely like "wash rotom". + + `input` + Name of the thing to look for. + + `valid_types` + A list of type or language restrictions, e.g., `['pokemon', + '@ja']`. If this is provided, only results in one of the given + tables will be returned. + + `exact_only` + If True, only exact matches are returned. If set to False (the + default), and the provided `name` doesn't match anything exactly, + spelling correction will be attempted. + """ + + name = self.normalize_name(input) + exact = True + form = None + + # Pop off any type prefix and merge with valid_types + name, merged_valid_types, type_term = \ + self._apply_valid_types(name, valid_types) + + # Random lookup + if name == 'random': + return self.random_lookup(valid_types=merged_valid_types) + + # Do different things depending what the query looks like + # Note: Term objects do an exact match, so we don't have to worry about + # a query parser tripping on weird characters in the input + try: + # Let Python try to convert to a number, so 0xff works + name_as_number = int(name, base=0) + except ValueError: + # Oh well + name_as_number = None + + if '*' in name or '?' in name: + exact_only = True + query = whoosh.query.Wildcard(u'name', name) + elif name_as_number is not None: + # Don't spell-check numbers! + exact_only = True + query = whoosh.query.Term(u'row_id', unicode(name_as_number)) + else: + # Not an integer + query = whoosh.query.Term(u'name', name) + + if type_term: + query = query & type_term + + + ### Actual searching + # Limits; result limits are constants, and intermediate results (before + # duplicate items are stripped out) are capped at the result limit + # times another constant. + # Fuzzy are capped at 10, beyond which something is probably very + # wrong. Exact matches -- that is, wildcards and ids -- are far less + # constrained. + # Also, exact matches are sorted by name, since weight doesn't matter. + sort_by = dict() + if exact_only: + max_results = self.MAX_EXACT_RESULTS + sort_by['sortedby'] = (u'table', u'name') + else: + max_results = self.MAX_FUZZY_RESULTS + + searcher = self.index.searcher(weighting=LanguageWeighting()) + results = searcher.search( + query, + limit=int(max_results * self.INTERMEDIATE_FACTOR), + **sort_by + ) + + # Look for some fuzzy matches if necessary + if not exact_only and not results: + exact = False + results = [] + + fuzzy_query_parts = [] + fuzzy_weights = {} + min_weight = [None] + for suggestion, _, weight in self.speller.suggestions_and_scores(name): + # Only allow the top 50% of scores; otherwise there will always + # be a lot of trailing junk + if min_weight[0] is None: + min_weight[0] = weight * 0.5 + elif weight < min_weight[0]: + break + + fuzzy_query_parts.append(whoosh.query.Term('name', suggestion)) + fuzzy_weights[suggestion] = weight + + if not fuzzy_query_parts: + # Nothing at all; don't try querying + return [] + + fuzzy_query = whoosh.query.Or(fuzzy_query_parts) + if type_term: + fuzzy_query = fuzzy_query & type_term + + searcher.weighting = LanguageWeighting(extra_weights=fuzzy_weights) + results = searcher.search(fuzzy_query) + + ### Convert results to db objects + objects = self._whoosh_records_to_results(results, exact=exact) + + # Truncate and return + return objects[:max_results] + + + def random_lookup(self, valid_types=[]): + """Returns a random lookup result from one of the provided + `valid_types`. + """ + + table_names = [] + for valid_type in valid_types: + table_name = self._parse_table_name(valid_type) + # Skip anything not recognized. Could be, say, a language code. + # XXX The vast majority of Pokémon forms are unnamed and unindexed, + # which can produce blank results. So skip them too for now. + if table_name and table_name != 'pokemon_forms': + table_names.append(table_name) + + if not table_names: + # n.b.: It's possible we got a list of valid_types and none of them + # were valid, but this function is guaranteed to return + # *something*, so it politely selects from the entire index instead + table_names = self.indexed_tables.keys() + table_names.remove('pokemon_forms') + + # Pick a random table, then pick a random item from it. Small tables + # like Type will have an unnatural bias. The alternative is that a + # simple search for "random" will do some eight queries, counting the + # rows in every single indexed table, and that's awful. + # XXX Can we improve on this, reasonably? + table_name = random.choice(table_names) + count = self.session.query(self.indexed_tables[table_name]).count() + id, = self.session.query(self.indexed_tables[table_name].id) \ + .offset(random.randint(0, count - 1)) \ + .first() + + return self.lookup(unicode(id), valid_types=[table_name]) + + def prefix_lookup(self, prefix, valid_types=[]): + """Returns terms starting with the given exact prefix. + + Type prefixes are recognized, but no other name munging is done. + """ + + # Pop off any type prefix and merge with valid_types + prefix, merged_valid_types, type_term = \ + self._apply_valid_types(prefix, valid_types) + + query = whoosh.query.Prefix(u'name', self.normalize_name(prefix)) + + if type_term: + query = query & type_term + + searcher = self.index.searcher() + searcher.weighting = LanguageWeighting() + results = searcher.search(query) # XXX , limit=self.MAX_LOOKUP_RESULTS) + + return self._whoosh_records_to_results(results)