"""CSV to database or vice versa."""
import csv
+import fnmatch
import os.path
-import pkg_resources
-import re
import sys
from sqlalchemy.orm.attributes import instrumentation_registry
from pokedex.db import metadata
import pokedex.db.tables as tables
+from pokedex.defaults import get_default_csv_dir
+from pokedex.db.dependencies import find_dependent_tables
-def _wildcard_char_to_regex(char):
- """Converts a single wildcard character to the regex equivalent."""
-
- if char == '?':
- return '.?'
- elif char == '*':
- return '.*'
+def _get_table_names(metadata, patterns):
+ """Returns a list of table names from the given metadata. If `patterns`
+ exists, only tables matching one of the patterns will be returned.
+ """
+ if patterns:
+ table_names = set()
+ for pattern in patterns:
+ if '.' in pattern or '/' in pattern:
+ # If it looks like a filename, pull out just the table name
+ _, filename = os.path.split(pattern)
+ table_name, _ = os.path.splitext(filename)
+ pattern = table_name
+
+ table_names.update(fnmatch.filter(metadata.tables.keys(), pattern))
else:
- return re.escape(char)
-
-def _wildcard_glob_to_regex(glob):
- """Converts a single wildcard glob to a regex STRING."""
-
- # If it looks like a filename, make it not one
- if '.' in glob or '/' in glob:
- _, filename = os.path.split(glob)
- table_name, _ = os.path.splitext(filename)
- glob = table_name
-
- return u''.join(map(_wildcard_char_to_regex, glob))
-
-def _wildcards_to_regex(strings):
- """Converts a list of wildcard globs to a single regex object."""
-
- regex_parts = map(_wildcard_glob_to_regex, strings)
-
- regex = '^(?:' + '|'.join(regex_parts) + ')$'
-
- return re.compile(regex)
+ table_names = metadata.tables.keys()
+ return list(table_names)
def _get_verbose_prints(verbose):
"""If `verbose` is true, returns three functions: one for printing a
def print_start(thing):
# Truncate to 66 characters, leaving 10 characters for a success
# or failure message
- truncated_thing = thing[0:66]
+ truncated_thing = thing[:66]
# Also, space-pad to keep the cursor in a known column
num_spaces = 66 - len(truncated_thing)
return print_start, print_status, print_done
-def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
+def load(session, tables=[], directory=None, drop_tables=False, verbose=False, safe=True, recursive=False):
"""Load data from CSV files into the given database session.
Tables are created automatically.
`verbose`
If set to True, status messages will be printed to stdout.
+
+ `safe`
+ If set to False, load can be faster, but can corrupt the database if
+ it crashes or is interrupted.
+
+ `recursive`
+ If set to True, load all dependent tables too.
"""
# First take care of verbosity
print_start, print_status, print_done = _get_verbose_prints(verbose)
- if not directory:
- directory = pkg_resources.resource_filename('pokedex', 'data/csv')
-
- if tables:
- regex = _wildcards_to_regex(tables)
- table_names = filter(regex.match, metadata.tables.keys())
- else:
- table_names = metadata.tables.keys()
+ if directory is None:
+ directory = get_default_csv_dir()
+ # XXX why isn't this done in command_load
+ table_names = _get_table_names(metadata, tables)
table_objs = [metadata.tables[name] for name in table_names]
+
+ if recursive:
+ table_objs.extend(find_dependent_tables(table_objs))
+
table_objs = sqlalchemy.sql.util.sort_tables(table_objs)
+ # SQLite speed tweaks
+ if not safe and session.connection().dialect.name == 'sqlite':
+ session.connection().execute("PRAGMA synchronous=OFF")
+ session.connection().execute("PRAGMA journal_mode=OFF")
# Drop all tables if requested
if drop_tables:
print_start('Dropping tables')
- for table in reversed(table_objs):
+ for n, table in enumerate(reversed(table_objs)):
table.drop(checkfirst=True)
+ print_status('%s/%s' % (n, len(table_objs)))
print_done()
- for table in table_objs:
+ print_start('Creating tables')
+ for n, table in enumerate(table_objs):
table.create()
+ print_status('%s/%s' % (n, len(table_objs)))
+ print_done()
connection = session.connection()
# Okay, run through the tables and actually load the data now
reader = csv.reader(csvfile, lineterminator='\n')
column_names = [unicode(column) for column in reader.next()]
+ if not safe and session.connection().dialect.name == 'postgresql':
+ """
+ Postgres' CSV dialect works with our data, if we mark the not-null
+ columns with FORCE NOT NULL.
+ COPY is only allowed for DB superusers. If you're not one, use safe
+ loading (pokedex load -S).
+ """
+ session.commit()
+ not_null_cols = [c for c in column_names if not table_obj.c[c].nullable]
+ if not_null_cols:
+ force_not_null = 'FORCE NOT NULL ' + ','.join('"%s"' % c for c in not_null_cols)
+ else:
+ force_not_null = ''
+ command = "COPY %(table_name)s (%(columns)s) FROM '%(csvpath)s' CSV HEADER %(force_not_null)s"
+ session.connection().execute(
+ command % dict(
+ table_name=table_name,
+ csvpath=csvpath,
+ columns=','.join('"%s"' % c for c in column_names),
+ force_not_null=force_not_null,
+ )
+ )
+ session.commit()
+ print_done()
+ continue
+
# Self-referential tables may contain rows with foreign keys of other
# rows in the same table that do not yet exist. Pull these out and add
# them to the session last
# ASSUMPTION: Self-referential tables have a single PK called "id"
deferred_rows = [] # ( row referring to id, [foreign ids we need] )
- seen_ids = {} # primary key we've seen => 1
+ seen_ids = set() # primary keys we've seen
# Fetch foreign key columns that point at this table, if any
self_ref_columns = []
for column in table_obj.c:
- if any(_.references(table_obj) for _ in column.foreign_keys):
+ if any(x.references(table_obj) for x in column.foreign_keys):
self_ref_columns.append(column)
new_rows = []
def insert_and_commit():
+ if not new_rows:
+ return
session.connection().execute(insert_stmt, new_rows)
session.commit()
new_rows[:] = []
- progress = "{0}%".format(100 * csvfile.tell() // csvsize)
+ progress = "%d%%" % (100 * csvfile.tell() // csvsize)
print_status(progress)
for csvs in reader:
# May need to stash this row and add it later if it refers to a
# later row in this table
if self_ref_columns:
- foreign_ids = [row_data[_.name] for _ in self_ref_columns]
- foreign_ids = [_ for _ in foreign_ids if _] # remove NULL ids
+ foreign_ids = set(row_data[x.name] for x in self_ref_columns)
+ foreign_ids.discard(None) # remove NULL ids
if not foreign_ids:
# NULL key. Remember this row and add as usual.
- seen_ids[row_data['id']] = 1
+ seen_ids.add(row_data['id'])
- elif all(_ in seen_ids for _ in foreign_ids):
+ elif foreign_ids.issubset(seen_ids):
# Non-NULL key we've already seen. Remember it and commit
# so we know the old row exists when we add the new one
insert_and_commit()
- seen_ids[row_data['id']] = 1
+ seen_ids.add(row_data['id'])
else:
# Non-NULL future id. Save this and insert it later!
# Attempt to add any spare rows we've collected
for row_data, foreign_ids in deferred_rows:
- if not all(_ in seen_ids for _ in foreign_ids):
+ if not foreign_ids.issubset(seen_ids):
# Could happen if row A refers to B which refers to C.
# This is ridiculous and doesn't happen in my data so far
raise ValueError("Too many levels of self-reference! "
print_done()
+ # SQLite check
+ if session.connection().dialect.name == 'sqlite':
+ session.connection().execute("PRAGMA integrity_check")
+
def dump(session, tables=[], directory=None, verbose=False):
if not directory:
- directory = pkg_resources.resource_filename('pokedex', 'data/csv')
-
- if tables:
- regex = _wildcards_to_regex(tables)
- table_names = filter(regex.match, metadata.tables.keys())
- else:
- table_names = metadata.tables.keys()
+ directory = get_default_csv_dir()
+ table_names = _get_table_names(metadata, tables)
table_names.sort()