X-Git-Url: http://git.veekun.com/zzz-pokedex.git/blobdiff_plain/5f78886f15b2d250e6f842c8b3842ed1499e28b8..5bbbc254aeeb41c1b3421ffb3188f0833d0a8d7b:/pokedex/lookup.py?ds=inline diff --git a/pokedex/lookup.py b/pokedex/lookup.py index 2565fcc..b31dce8 100644 --- a/pokedex/lookup.py +++ b/pokedex/lookup.py @@ -5,6 +5,7 @@ import pkg_resources import random import re import shutil +import unicodedata from sqlalchemy.sql import func import whoosh @@ -37,6 +38,28 @@ for cls in [ ]: indexed_tables[cls.__tablename__] = cls +def normalize(name): + """Strips irrelevant formatting junk from name input. + + Specifically: everything is lowercased, and accents are removed. + """ + # http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string + # Makes sense to me. Decompose by Unicode rules, then remove combining + # characters, then recombine. I'm explicitly doing it this way instead of + # testing combining() because Korean characters apparently decompose! But + # the results are considered letters, not combining characters, so testing + # for Mn works well, and combining them again makes them look right. + nkfd_form = unicodedata.normalize('NFKD', unicode(name)) + name = u"".join(c for c in nkfd_form + if unicodedata.category(c) != 'Mn') + name = unicodedata.normalize('NFC', name) + + name = name.strip() + name = name.lower() + + return name + + def open_index(directory=None, session=None, recreate=False): """Opens the whoosh index stored in the named directory and returns (index, speller). If the index doesn't already exist, it will be created. @@ -119,11 +142,12 @@ def open_index(directory=None, session=None, recreate=False): forme_name=u'XXX') def add(name, language, iso3166, score): - writer.add_document(name=name.lower(), display_name=name, + normalized_name = normalize(name) + writer.add_document(name=normalized_name, display_name=name, language=language, iso3166=iso3166, **row_key) - speller_entries.append((name.lower(), score)) + speller_entries.append((normalized_name, score)) # If this is a form, mark it as such if getattr(row, 'forme_base_pokemon_id', None): @@ -228,10 +252,11 @@ def _whoosh_records_to_results(records, session, exact=True): def lookup(input, valid_types=[], session=None, indices=None, exact_only=False): """Attempts to find some sort of object, given a database session and name. - Returns a list of named (object, name, language, exact) tuples. `object` - is a database object, `name` is the name under which the object was found, - `language` is the name of the language in which the name was found, and - `exact` is True iff this was an exact match. + Returns a list of named (object, name, language, iso3166, exact) tuples. + `object` is a database object, `name` is the name under which the object + was found, `language` and `iso3166` are the name and country code of the + language in which the name was found, and `exact` is True iff this was an + exact match. This function currently ONLY does fuzzy matching if there are no exact matches. @@ -284,7 +309,7 @@ def lookup(input, valid_types=[], session=None, indices=None, exact_only=False): else: index, speller = open_index() - name = unicode(input).strip().lower() + name = normalize(input) exact = True form = None @@ -331,7 +356,9 @@ def lookup(input, valid_types=[], session=None, indices=None, exact_only=False): type_terms = [] for valid_type in valid_types: table_name = _parse_table_name(valid_type) - type_terms.append(whoosh.query.Term(u'table', table_name)) + if table_name: + # Quietly ignore bogus valid_types; more likely to DTRT + type_terms.append(whoosh.query.Term(u'table', table_name)) if type_terms: query = query & whoosh.query.Or(type_terms) @@ -376,11 +403,17 @@ def random_lookup(valid_types=[], session=None, indices=None): tables.append(indexed_tables[table_name]) if not tables: + # n.b.: It's possible we got a list of valid_types and none of them + # were valid, but this function is guaranteed to return *something*, so + # it politely selects from the entire index isntead tables = indexed_tables.values() # Rather than create an array of many hundred items and pick randomly from # it, just pick a number up to the total number of potential items, then - # pick randomly from that, and partition the whole range into chunks + # pick randomly from that, and partition the whole range into chunks. + # This also avoids the slight problem that the index contains more rows + # (for languages) for some items than others. + # XXX ought to cache this (in the index?) if possible total = 0 partitions = [] for table in tables: @@ -413,7 +446,7 @@ def prefix_lookup(prefix, session=None, indices=None): else: index, speller = open_index() - query = whoosh.query.Prefix(u'name', prefix.lower()) + query = whoosh.query.Prefix(u'name', normalize(prefix)) searcher = index.searcher() searcher.weighting = LanguageWeighting()