Lookup no longer crashes if all type prefixes are bogus.
[zzz-pokedex.git] / pokedex / db / load.py
index 1cff39d..2b14883 100644 (file)
@@ -10,7 +10,41 @@ from pokedex.db import metadata
 import pokedex.db.tables as tables
 
 
-def load(session, directory=None, drop_tables=False):
+def _get_verbose_prints(verbose):
+    """If `verbose` is true, returns two functions: one for printing a starting
+    message, and the other for printing a success or failure message when
+    finished.
+
+    If `verbose` is false, returns two no-op functions.
+    """
+
+    if verbose:
+        import sys
+        def print_start(thing):
+            # Truncate to 66 characters, leaving 10 characters for a success
+            # or failure message
+            truncated_thing = thing[0:66]
+
+            # Also, space-pad to keep the cursor in a known column
+            num_spaces = 66 - len(truncated_thing)
+
+            print "%s...%s" % (truncated_thing, ' ' * num_spaces),
+            sys.stdout.flush()
+
+        def print_done(msg='ok'):
+            print msg
+            sys.stdout.flush()
+
+        return print_start, print_done
+
+    # Not verbose; return dummies
+    def dummy(*args, **kwargs):
+        pass
+
+    return dummy, dummy
+
+
+def load(session, directory=None, drop_tables=False, verbose=False):
     """Load data from CSV files into the given database session.
 
     Tables are created automatically.
@@ -24,55 +58,39 @@ def load(session, directory=None, drop_tables=False):
 
     `drop_tables`
         If set to True, existing `pokedex`-related tables will be dropped.
+
+    `verbose`
+        If set to True, status messages will be printed to stdout.
     """
 
+    # First take care of verbosity
+    print_start, print_done = _get_verbose_prints(verbose)
+
+
     if not directory:
         directory = pkg_resources.resource_filename('pokedex', 'data/csv')
 
     # Drop all tables if requested
-    if options.drop_tables:
-        print 'Dropping tables...'
+    if drop_tables:
+        print_start('Dropping tables')
         metadata.drop_all()
+        print_done()
 
     metadata.create_all()
-
-    # SQLAlchemy is retarded and there is no way for me to get a list of ORM
-    # classes besides to inspect the module they all happen to live in for
-    # things that look right.
-    table_base = tables.TableBase
-    orm_classes = {}  # table object => table class
-
-    for name in dir(tables):
-        # dir() returns strings!  How /convenient/.
-        thingy = getattr(tables, name)
-
-        if not isinstance(thingy, type):
-            # Not a class; bail
-            continue
-        elif not issubclass(thingy, table_base):
-            # Not a declarative table; bail
-            continue
-        elif thingy == table_base:
-            # Declarative table base, so not a real table; bail
-            continue
-
-        # thingy is definitely a table class!  Hallelujah.
-        orm_classes[thingy.__table__] = thingy
+    connection = session.connection()
 
     # Okay, run through the tables and actually load the data now
     for table_obj in metadata.sorted_tables:
-        table_class = orm_classes[table_obj]
         table_name = table_obj.name
+        insert_stmt = table_obj.insert()
 
-        # Print the table name but leave the cursor in a fixed column
-        print table_name + '...', ' ' * (40 - len(table_name)),
-        sys.stdout.flush()
+        print_start(table_name)
 
         try:
             csvfile = open("%s/%s.csv" % (directory, table_name), 'rb')
         except IOError:
             # File doesn't exist; don't load anything!
-            print 'no data!'
+            print_done('missing?')
             continue
 
         reader = csv.reader(csvfile, lineterminator='\n')
@@ -91,8 +109,14 @@ def load(session, directory=None, drop_tables=False):
             if any(_.references(table_obj) for _ in column.foreign_keys):
                 self_ref_columns.append(column)
 
+        new_rows = []
+        def insert_and_commit():
+            session.connection().execute(insert_stmt, new_rows)
+            session.commit()
+            new_rows[:] = []
+
         for csvs in reader:
-            row = table_class()
+            row_data = {}
 
             for column_name, value in zip(column_names, csvs):
                 column = table_obj.c[column_name]
@@ -110,50 +134,59 @@ def load(session, directory=None, drop_tables=False):
                     # Otherwise, unflatten from bytes
                     value = value.decode('utf-8')
 
-                setattr(row, column_name, value)
+                # nb: Dictionaries flattened with ** have to have string keys
+                row_data[ str(column_name) ] = value
 
             # May need to stash this row and add it later if it refers to a
             # later row in this table
             if self_ref_columns:
-                foreign_ids = [getattr(row, _.name) for _ in self_ref_columns]
+                foreign_ids = [row_data[_.name] for _ in self_ref_columns]
                 foreign_ids = [_ for _ in foreign_ids if _]  # remove NULL ids
 
                 if not foreign_ids:
                     # NULL key.  Remember this row and add as usual.
-                    seen_ids[row.id] = 1
+                    seen_ids[row_data['id']] = 1
 
                 elif all(_ in seen_ids for _ in foreign_ids):
                     # Non-NULL key we've already seen.  Remember it and commit
                     # so we know the old row exists when we add the new one
-                    session.commit()
-                    seen_ids[row.id] = 1
+                    insert_and_commit()
+                    seen_ids[row_data['id']] = 1
 
                 else:
                     # Non-NULL future id.  Save this and insert it later!
-                    deferred_rows.append((row, foreign_ids))
+                    deferred_rows.append((row_data, foreign_ids))
                     continue
 
-            session.add(row)
+            # Insert row!
+            new_rows.append(row_data)
 
-        session.commit()
+            # Remembering some zillion rows in the session consumes a lot of
+            # RAM.  Let's not do that.  Commit every 1000 rows
+            if len(new_rows) > 1000:
+                insert_and_commit()
+
+        insert_and_commit()
 
         # Attempt to add any spare rows we've collected
-        for row, foreign_ids in deferred_rows:
+        for row_data, foreign_ids in deferred_rows:
             if not all(_ in seen_ids for _ in foreign_ids):
                 # Could happen if row A refers to B which refers to C.
                 # This is ridiculous and doesn't happen in my data so far
                 raise ValueError("Too many levels of self-reference!  "
-                                 "Row was: " + str(row.__dict__))
+                                 "Row was: " + str(row))
 
-            session.add(row)
-            seen_ids[row.id] = 1
-            session.commit()
+            session.connection().execute(
+                insert_stmt.values(**row_data)
+            )
+            seen_ids[row_data['id']] = 1
+        session.commit()
 
-        print 'loaded'
+        print_done()
 
 
 
-def dump(session, directory=None):
+def dump(session, directory=None, verbose=False):
     """Dumps the contents of a database to a set of CSV files.  Probably not
     useful to anyone besides a developer.
 
@@ -163,13 +196,20 @@ def dump(session, directory=None):
     `directory`
         Directory the CSV files should be put in.  Defaults to the `pokedex`
         data directory.
+
+    `verbose`
+        If set to True, status messages will be printed to stdout.
     """
 
+    # First take care of verbosity
+    print_start, print_done = _get_verbose_prints(verbose)
+
+
     if not directory:
         directory = pkg_resources.resource_filename('pokedex', 'data/csv')
 
     for table_name in sorted(metadata.tables.keys()):
-        print table_name
+        print_start(table_name)
         table = metadata.tables[table_name]
 
         writer = csv.writer(open("%s/%s.csv" % (directory, table_name), 'wb'),
@@ -195,3 +235,5 @@ def dump(session, directory=None):
                 csvs.append(val)
 
             writer.writerow(csvs)
+
+        print_done()