import random
import re
import shutil
+import unicodedata
from sqlalchemy.sql import func
import whoosh
]:
indexed_tables[cls.__tablename__] = cls
+def normalize(name):
+ """Strips irrelevant formatting junk from name input.
+
+ Specifically: everything is lowercased, and accents are removed.
+ """
+ # http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
+ # Makes sense to me. Decompose by Unicode rules, then remove combining
+ # characters, then recombine. I'm explicitly doing it this way instead of
+ # testing combining() because Korean characters apparently decompose! But
+ # the results are considered letters, not combining characters, so testing
+ # for Mn works well, and combining them again makes them look right.
+ nkfd_form = unicodedata.normalize('NFKD', unicode(name))
+ name = u"".join(c for c in nkfd_form
+ if unicodedata.category(c) != 'Mn')
+ name = unicodedata.normalize('NFC', name)
+
+ name = name.strip()
+ name = name.lower()
+
+ return name
+
+
def open_index(directory=None, session=None, recreate=False):
"""Opens the whoosh index stored in the named directory and returns (index,
speller). If the index doesn't already exist, it will be created.
table=whoosh.fields.ID(stored=True),
row_id=whoosh.fields.ID(stored=True),
language=whoosh.fields.STORED,
+ iso3166=whoosh.fields.STORED,
display_name=whoosh.fields.STORED, # non-lowercased name
forme_name=whoosh.fields.ID,
)
row_id=unicode(row.id),
forme_name=u'XXX')
- def add(name, language, score):
- writer.add_document(name=name.lower(), display_name=name,
+ def add(name, language, iso3166, score):
+ normalized_name = normalize(name)
+ writer.add_document(name=normalized_name, display_name=name,
language=language,
+ iso3166=iso3166,
**row_key)
- speller_entries.append((name.lower(), score))
+ speller_entries.append((normalized_name, score))
# If this is a form, mark it as such
if getattr(row, 'forme_base_pokemon_id', None):
row_key['forme_name'] = row.forme_name
name = row.name
- add(name, None, 1)
+ add(name, None, u'us', 1)
# Pokemon also get other languages
for foreign_name in getattr(row, 'foreign_names', []):
# no point and it makes spell results confusing
continue
- add(moonspeak, foreign_name.language.name, 3)
+ add(moonspeak, foreign_name.language.name,
+ foreign_name.language.iso3166,
+ 3)
# Add Roomaji too
if foreign_name.language.name == 'Japanese':
roomaji = romanize(foreign_name.name)
- add(roomaji, u'Roomaji', 8)
+ add(roomaji, u'Roomaji', u'jp', 8)
writer.commit()
rx_is_number = re.compile('^\d+$')
LookupResult = namedtuple('LookupResult',
- ['object', 'name', 'language', 'exact'])
+ ['object', 'name', 'language', 'iso3166', 'exact'])
def _parse_table_name(name):
"""Takes a singular table name, table name, or table object and returns the
# Bogus. Be nice and return dummy
return None
+def _whoosh_records_to_results(records, session, exact=True):
+ """Converts a list of whoosh's indexed records to LookupResult tuples
+ containing database objects.
+ """
+ # XXX this 'exact' thing is getting kinda leaky. would like a better way
+ # to handle it, since only lookup() cares about fuzzy results
+ seen = {}
+ results = []
+ for record in records:
+ # Skip dupes
+ seen_key = record['table'], record['row_id']
+ if seen_key in seen:
+ continue
+ seen[seen_key] = True
+
+ cls = indexed_tables[record['table']]
+ obj = session.query(cls).get(record['row_id'])
+
+ results.append(LookupResult(object=obj,
+ name=record['display_name'],
+ language=record['language'],
+ iso3166=record['iso3166'],
+ exact=exact))
+
+ return results
+
def lookup(input, valid_types=[], session=None, indices=None, exact_only=False):
"""Attempts to find some sort of object, given a database session and name.
- Returns a list of named (object, name, language, exact) tuples. `object`
- is a database object, `name` is the name under which the object was found,
- `language` is the name of the language in which the name was found, and
- `exact` is True iff this was an exact match.
+ Returns a list of named (object, name, language, iso3166, exact) tuples.
+ `object` is a database object, `name` is the name under which the object
+ was found, `language` and `iso3166` are the name and country code of the
+ language in which the name was found, and `exact` is True iff this was an
+ exact match.
This function currently ONLY does fuzzy matching if there are no exact
matches.
else:
index, speller = open_index()
- name = unicode(input).strip().lower()
+ name = normalize(input)
exact = True
form = None
type_terms = []
for valid_type in valid_types:
table_name = _parse_table_name(valid_type)
- type_terms.append(whoosh.query.Term(u'table', table_name))
+ if table_name:
+ # Quietly ignore bogus valid_types; more likely to DTRT
+ type_terms.append(whoosh.query.Term(u'table', table_name))
if type_terms:
query = query & whoosh.query.Or(type_terms)
results.extend(searcher.search(query))
### Convert results to db objects
- objects = []
- seen = {}
- for result in results:
- # Skip dupe results
- seen_key = result['table'], result['row_id']
- if seen_key in seen:
- continue
- seen[seen_key] = True
-
- cls = indexed_tables[result['table']]
- obj = session.query(cls).get(result['row_id'])
-
- objects.append(LookupResult(object=obj,
- name=result['display_name'],
- language=result['language'],
- exact=exact))
+ objects = _whoosh_records_to_results(results, session, exact=exact)
# Only return up to 10 matches; beyond that, something is wrong.
# We strip out duplicate entries above, so it's remotely possible that we
tables.append(indexed_tables[table_name])
if not tables:
+ # n.b.: It's possible we got a list of valid_types and none of them
+ # were valid, but this function is guaranteed to return *something*, so
+ # it politely selects from the entire index isntead
tables = indexed_tables.values()
# Rather than create an array of many hundred items and pick randomly from
# it, just pick a number up to the total number of potential items, then
- # pick randomly from that, and partition the whole range into chunks
+ # pick randomly from that, and partition the whole range into chunks.
+ # This also avoids the slight problem that the index contains more rows
+ # (for languages) for some items than others.
+ # XXX ought to cache this (in the index?) if possible
total = 0
partitions = []
for table in tables:
return lookup(unicode(n), valid_types=[ partitions[0][0] ],
indices=indices, session=session)
+
+def prefix_lookup(prefix, session=None, indices=None):
+ """Returns terms starting with the given exact prefix.
+
+ No special magic is currently done with the name; type prefixes are not
+ recognized.
+
+ `session` and `indices` are treated as with `lookup()`.
+ """
+
+ if not session:
+ session = connect()
+
+ if indices:
+ index, speller = indices
+ else:
+ index, speller = open_index()
+
+ query = whoosh.query.Prefix(u'name', normalize(prefix))
+
+ searcher = index.searcher()
+ searcher.weighting = LanguageWeighting()
+ results = searcher.search(query) # XXX , limit=MAX_LOOKUP_RESULTS)
+
+ return _whoosh_records_to_results(results, session)