X-Git-Url: http://git.veekun.com/zzz-pokedex.git/blobdiff_plain/6a49139f94f9dd48ebbb21dac3423fb79f7fe4ce..b11dddc0517b1c928695b71aa9b74c85485e9e6c:/pokedex/db/load.py diff --git a/pokedex/db/load.py b/pokedex/db/load.py index e3e7fa5..5740f42 100644 --- a/pokedex/db/load.py +++ b/pokedex/db/load.py @@ -1,8 +1,7 @@ """CSV to database or vice versa.""" import csv +import fnmatch import os.path -import pkg_resources -import re import sys from sqlalchemy.orm.attributes import instrumentation_registry @@ -11,38 +10,28 @@ import sqlalchemy.types from pokedex.db import metadata import pokedex.db.tables as tables +from pokedex.defaults import get_default_csv_dir +from pokedex.db.dependencies import find_dependent_tables -def _wildcard_char_to_regex(char): - """Converts a single wildcard character to the regex equivalent.""" - - if char == '?': - return '.?' - elif char == '*': - return '.*' +def _get_table_names(metadata, patterns): + """Returns a list of table names from the given metadata. If `patterns` + exists, only tables matching one of the patterns will be returned. + """ + if patterns: + table_names = set() + for pattern in patterns: + if '.' in pattern or '/' in pattern: + # If it looks like a filename, pull out just the table name + _, filename = os.path.split(pattern) + table_name, _ = os.path.splitext(filename) + pattern = table_name + + table_names.update(fnmatch.filter(metadata.tables.keys(), pattern)) else: - return re.escape(char) - -def _wildcard_glob_to_regex(glob): - """Converts a single wildcard glob to a regex STRING.""" - - # If it looks like a filename, make it not one - if '.' in glob or '/' in glob: - _, filename = os.path.split(glob) - table_name, _ = os.path.splitext(filename) - glob = table_name - - return u''.join(map(_wildcard_char_to_regex, glob)) - -def _wildcards_to_regex(strings): - """Converts a list of wildcard globs to a single regex object.""" - - regex_parts = map(_wildcard_glob_to_regex, strings) - - regex = '^(?:' + '|'.join(regex_parts) + ')$' - - return re.compile(regex) + table_names = metadata.tables.keys() + return list(table_names) def _get_verbose_prints(verbose): """If `verbose` is true, returns three functions: one for printing a @@ -64,7 +53,7 @@ def _get_verbose_prints(verbose): def print_start(thing): # Truncate to 66 characters, leaving 10 characters for a success # or failure message - truncated_thing = thing[0:66] + truncated_thing = thing[:66] # Also, space-pad to keep the cursor in a known column num_spaces = 66 - len(truncated_thing) @@ -107,7 +96,7 @@ def _get_verbose_prints(verbose): return print_start, print_status, print_done -def load(session, tables=[], directory=None, drop_tables=False, verbose=False): +def load(session, tables=[], directory=None, drop_tables=False, verbose=False, safe=True, recursive=False): """Load data from CSV files into the given database session. Tables are created automatically. @@ -127,34 +116,49 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False): `verbose` If set to True, status messages will be printed to stdout. + + `safe` + If set to False, load can be faster, but can corrupt the database if + it crashes or is interrupted. + + `recursive` + If set to True, load all dependent tables too. """ # First take care of verbosity print_start, print_status, print_done = _get_verbose_prints(verbose) - if not directory: - directory = pkg_resources.resource_filename('pokedex', 'data/csv') - - if tables: - regex = _wildcards_to_regex(tables) - table_names = filter(regex.match, metadata.tables.keys()) - else: - table_names = metadata.tables.keys() + if directory is None: + directory = get_default_csv_dir() + # XXX why isn't this done in command_load + table_names = _get_table_names(metadata, tables) table_objs = [metadata.tables[name] for name in table_names] + + if recursive: + table_objs.extend(find_dependent_tables(table_objs)) + table_objs = sqlalchemy.sql.util.sort_tables(table_objs) + # SQLite speed tweaks + if not safe and session.connection().dialect.name == 'sqlite': + session.connection().execute("PRAGMA synchronous=OFF") + session.connection().execute("PRAGMA journal_mode=OFF") # Drop all tables if requested if drop_tables: print_start('Dropping tables') - for table in reversed(table_objs): + for n, table in enumerate(reversed(table_objs)): table.drop(checkfirst=True) + print_status('%s/%s' % (n, len(table_objs))) print_done() - for table in table_objs: + print_start('Creating tables') + for n, table in enumerate(table_objs): table.create() + print_status('%s/%s' % (n, len(table_objs))) + print_done() connection = session.connection() # Okay, run through the tables and actually load the data now @@ -177,26 +181,54 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False): reader = csv.reader(csvfile, lineterminator='\n') column_names = [unicode(column) for column in reader.next()] + if not safe and session.connection().dialect.name == 'postgresql': + """ + Postgres' CSV dialect works with our data, if we mark the not-null + columns with FORCE NOT NULL. + COPY is only allowed for DB superusers. If you're not one, use safe + loading (pokedex load -S). + """ + session.commit() + not_null_cols = [c for c in column_names if not table_obj.c[c].nullable] + if not_null_cols: + force_not_null = 'FORCE NOT NULL ' + ','.join('"%s"' % c for c in not_null_cols) + else: + force_not_null = '' + command = "COPY %(table_name)s (%(columns)s) FROM '%(csvpath)s' CSV HEADER %(force_not_null)s" + session.connection().execute( + command % dict( + table_name=table_name, + csvpath=csvpath, + columns=','.join('"%s"' % c for c in column_names), + force_not_null=force_not_null, + ) + ) + session.commit() + print_done() + continue + # Self-referential tables may contain rows with foreign keys of other # rows in the same table that do not yet exist. Pull these out and add # them to the session last # ASSUMPTION: Self-referential tables have a single PK called "id" deferred_rows = [] # ( row referring to id, [foreign ids we need] ) - seen_ids = {} # primary key we've seen => 1 + seen_ids = set() # primary keys we've seen # Fetch foreign key columns that point at this table, if any self_ref_columns = [] for column in table_obj.c: - if any(_.references(table_obj) for _ in column.foreign_keys): + if any(x.references(table_obj) for x in column.foreign_keys): self_ref_columns.append(column) new_rows = [] def insert_and_commit(): + if not new_rows: + return session.connection().execute(insert_stmt, new_rows) session.commit() new_rows[:] = [] - progress = "{0}%".format(100 * csvfile.tell() // csvsize) + progress = "%d%%" % (100 * csvfile.tell() // csvsize) print_status(progress) for csvs in reader: @@ -224,18 +256,18 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False): # May need to stash this row and add it later if it refers to a # later row in this table if self_ref_columns: - foreign_ids = [row_data[_.name] for _ in self_ref_columns] - foreign_ids = [_ for _ in foreign_ids if _] # remove NULL ids + foreign_ids = set(row_data[x.name] for x in self_ref_columns) + foreign_ids.discard(None) # remove NULL ids if not foreign_ids: # NULL key. Remember this row and add as usual. - seen_ids[row_data['id']] = 1 + seen_ids.add(row_data['id']) - elif all(_ in seen_ids for _ in foreign_ids): + elif foreign_ids.issubset(seen_ids): # Non-NULL key we've already seen. Remember it and commit # so we know the old row exists when we add the new one insert_and_commit() - seen_ids[row_data['id']] = 1 + seen_ids.add(row_data['id']) else: # Non-NULL future id. Save this and insert it later! @@ -254,7 +286,7 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False): # Attempt to add any spare rows we've collected for row_data, foreign_ids in deferred_rows: - if not all(_ in seen_ids for _ in foreign_ids): + if not foreign_ids.issubset(seen_ids): # Could happen if row A refers to B which refers to C. # This is ridiculous and doesn't happen in my data so far raise ValueError("Too many levels of self-reference! " @@ -268,6 +300,10 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False): print_done() + # SQLite check + if session.connection().dialect.name == 'sqlite': + session.connection().execute("PRAGMA integrity_check") + def dump(session, tables=[], directory=None, verbose=False): @@ -293,14 +329,9 @@ def dump(session, tables=[], directory=None, verbose=False): if not directory: - directory = pkg_resources.resource_filename('pokedex', 'data/csv') - - if tables: - regex = _wildcards_to_regex(tables) - table_names = filter(regex.match, metadata.tables.keys()) - else: - table_names = metadata.tables.keys() + directory = get_default_csv_dir() + table_names = _get_table_names(metadata, tables) table_names.sort()