Effects for the remaining B/W items. #247
[zzz-pokedex.git] / pokedex / db / load.py
index d5502ea..5740f42 100644 (file)
@@ -2,7 +2,6 @@
 import csv
 import fnmatch
 import os.path
-import pkg_resources
 import sys
 
 from sqlalchemy.orm.attributes import instrumentation_registry
@@ -11,6 +10,8 @@ import sqlalchemy.types
 
 from pokedex.db import metadata
 import pokedex.db.tables as tables
+from pokedex.defaults import get_default_csv_dir
+from pokedex.db.dependencies import find_dependent_tables
 
 
 def _get_table_names(metadata, patterns):
@@ -52,7 +53,7 @@ def _get_verbose_prints(verbose):
     def print_start(thing):
         # Truncate to 66 characters, leaving 10 characters for a success
         # or failure message
-        truncated_thing = thing[0:66]
+        truncated_thing = thing[:66]
 
         # Also, space-pad to keep the cursor in a known column
         num_spaces = 66 - len(truncated_thing)
@@ -95,7 +96,7 @@ def _get_verbose_prints(verbose):
     return print_start, print_status, print_done
 
 
-def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
+def load(session, tables=[], directory=None, drop_tables=False, verbose=False, safe=True, recursive=False):
     """Load data from CSV files into the given database session.
 
     Tables are created automatically.
@@ -115,29 +116,49 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
 
     `verbose`
         If set to True, status messages will be printed to stdout.
+
+    `safe`
+        If set to False, load can be faster, but can corrupt the database if
+        it crashes or is interrupted.
+
+    `recursive`
+        If set to True, load all dependent tables too.
     """
 
     # First take care of verbosity
     print_start, print_status, print_done = _get_verbose_prints(verbose)
 
 
-    if not directory:
-        directory = pkg_resources.resource_filename('pokedex', 'data/csv')
+    if directory is None:
+        directory = get_default_csv_dir()
 
+    # XXX why isn't this done in command_load
     table_names = _get_table_names(metadata, tables)
     table_objs = [metadata.tables[name] for name in table_names]
+
+    if recursive:
+        table_objs.extend(find_dependent_tables(table_objs))
+
     table_objs = sqlalchemy.sql.util.sort_tables(table_objs)
 
+    # SQLite speed tweaks
+    if not safe and session.connection().dialect.name == 'sqlite':
+        session.connection().execute("PRAGMA synchronous=OFF")
+        session.connection().execute("PRAGMA journal_mode=OFF")
 
     # Drop all tables if requested
     if drop_tables:
         print_start('Dropping tables')
-        for table in reversed(table_objs):
+        for n, table in enumerate(reversed(table_objs)):
             table.drop(checkfirst=True)
+            print_status('%s/%s' % (n, len(table_objs)))
         print_done()
 
-    for table in table_objs:
+    print_start('Creating tables')
+    for n, table in enumerate(table_objs):
         table.create()
+        print_status('%s/%s' % (n, len(table_objs)))
+    print_done()
     connection = session.connection()
 
     # Okay, run through the tables and actually load the data now
@@ -160,26 +181,54 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
         reader = csv.reader(csvfile, lineterminator='\n')
         column_names = [unicode(column) for column in reader.next()]
 
+        if not safe and session.connection().dialect.name == 'postgresql':
+            """
+            Postgres' CSV dialect works with our data, if we mark the not-null
+            columns with FORCE NOT NULL.
+            COPY is only allowed for DB superusers. If you're not one, use safe
+            loading (pokedex load -S).
+            """
+            session.commit()
+            not_null_cols = [c for c in column_names if not table_obj.c[c].nullable]
+            if not_null_cols:
+                force_not_null = 'FORCE NOT NULL ' + ','.join('"%s"' % c for c in not_null_cols)
+            else:
+                force_not_null = ''
+            command = "COPY %(table_name)s (%(columns)s) FROM '%(csvpath)s' CSV HEADER %(force_not_null)s"
+            session.connection().execute(
+                command % dict(
+                    table_name=table_name,
+                    csvpath=csvpath,
+                    columns=','.join('"%s"' % c for c in column_names),
+                    force_not_null=force_not_null,
+                )
+            )
+            session.commit()
+            print_done()
+            continue
+
         # Self-referential tables may contain rows with foreign keys of other
         # rows in the same table that do not yet exist.  Pull these out and add
         # them to the session last
         # ASSUMPTION: Self-referential tables have a single PK called "id"
         deferred_rows = []  # ( row referring to id, [foreign ids we need] )
-        seen_ids = {}       # primary key we've seen => 1
+        seen_ids = set()    # primary keys we've seen
 
         # Fetch foreign key columns that point at this table, if any
         self_ref_columns = []
         for column in table_obj.c:
-            if any(_.references(table_obj) for _ in column.foreign_keys):
+            if any(x.references(table_obj) for x in column.foreign_keys):
                 self_ref_columns.append(column)
 
         new_rows = []
         def insert_and_commit():
+            if not new_rows:
+                return
             session.connection().execute(insert_stmt, new_rows)
             session.commit()
             new_rows[:] = []
 
-            progress = "{0}%".format(100 * csvfile.tell() // csvsize)
+            progress = "%d%%" % (100 * csvfile.tell() // csvsize)
             print_status(progress)
 
         for csvs in reader:
@@ -207,18 +256,18 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
             # May need to stash this row and add it later if it refers to a
             # later row in this table
             if self_ref_columns:
-                foreign_ids = [row_data[_.name] for _ in self_ref_columns]
-                foreign_ids = [_ for _ in foreign_ids if _]  # remove NULL ids
+                foreign_ids = set(row_data[x.name] for x in self_ref_columns)
+                foreign_ids.discard(None)  # remove NULL ids
 
                 if not foreign_ids:
                     # NULL key.  Remember this row and add as usual.
-                    seen_ids[row_data['id']] = 1
+                    seen_ids.add(row_data['id'])
 
-                elif all(_ in seen_ids for _ in foreign_ids):
+                elif foreign_ids.issubset(seen_ids):
                     # Non-NULL key we've already seen.  Remember it and commit
                     # so we know the old row exists when we add the new one
                     insert_and_commit()
-                    seen_ids[row_data['id']] = 1
+                    seen_ids.add(row_data['id'])
 
                 else:
                     # Non-NULL future id.  Save this and insert it later!
@@ -237,7 +286,7 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
 
         # Attempt to add any spare rows we've collected
         for row_data, foreign_ids in deferred_rows:
-            if not all(_ in seen_ids for _ in foreign_ids):
+            if not foreign_ids.issubset(seen_ids):
                 # Could happen if row A refers to B which refers to C.
                 # This is ridiculous and doesn't happen in my data so far
                 raise ValueError("Too many levels of self-reference!  "
@@ -251,6 +300,10 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False):
 
         print_done()
 
+    # SQLite check
+    if session.connection().dialect.name == 'sqlite':
+        session.connection().execute("PRAGMA integrity_check")
+
 
 
 def dump(session, tables=[], directory=None, verbose=False):
@@ -276,7 +329,7 @@ def dump(session, tables=[], directory=None, verbose=False):
 
 
     if not directory:
-        directory = pkg_resources.resource_filename('pokedex', 'data/csv')
+        directory = get_default_csv_dir()
 
     table_names = _get_table_names(metadata, tables)
     table_names.sort()