Load translations in pokedex load.
[zzz-pokedex.git] / pokedex / db / load.py
index 88f5332..c77064f 100644 (file)
@@ -8,8 +8,8 @@ from sqlalchemy.orm.attributes import instrumentation_registry
 import sqlalchemy.sql.util
 import sqlalchemy.types
 
-from pokedex.db import metadata
-import pokedex.db.tables as tables
+import pokedex
+from pokedex.db import metadata, tables, translations
 from pokedex.defaults import get_default_csv_dir
 from pokedex.db.dependencies import find_dependent_tables
 
@@ -53,7 +53,7 @@ def _get_verbose_prints(verbose):
     def print_start(thing):
         # Truncate to 66 characters, leaving 10 characters for a success
         # or failure message
-        truncated_thing = thing[0:66]
+        truncated_thing = thing[:66]
 
         # Also, space-pad to keep the cursor in a known column
         num_spaces = 66 - len(truncated_thing)
@@ -96,7 +96,7 @@ def _get_verbose_prints(verbose):
     return print_start, print_status, print_done
 
 
-def load(session, tables=[], directory=None, drop_tables=False, verbose=False, safe=True, recursive=False):
+def load(session, tables=[], directory=None, drop_tables=False, verbose=False, safe=True, recursive=True, langs=None):
     """Load data from CSV files into the given database session.
 
     Tables are created automatically.
@@ -123,6 +123,9 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False, s
 
     `recursive`
         If set to True, load all dependent tables too.
+
+    `langs`
+        List of identifiers of extra language to load, or None to load them all
     """
 
     # First take care of verbosity
@@ -212,12 +215,12 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False, s
         # them to the session last
         # ASSUMPTION: Self-referential tables have a single PK called "id"
         deferred_rows = []  # ( row referring to id, [foreign ids we need] )
-        seen_ids = {}       # primary key we've seen => 1
+        seen_ids = set()    # primary keys we've seen
 
         # Fetch foreign key columns that point at this table, if any
         self_ref_columns = []
         for column in table_obj.c:
-            if any(_.references(table_obj) for _ in column.foreign_keys):
+            if any(x.references(table_obj) for x in column.foreign_keys):
                 self_ref_columns.append(column)
 
         new_rows = []
@@ -256,18 +259,18 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False, s
             # May need to stash this row and add it later if it refers to a
             # later row in this table
             if self_ref_columns:
-                foreign_ids = [row_data[_.name] for _ in self_ref_columns]
-                foreign_ids = [_ for _ in foreign_ids if _]  # remove NULL ids
+                foreign_ids = set(row_data[x.name] for x in self_ref_columns)
+                foreign_ids.discard(None)  # remove NULL ids
 
                 if not foreign_ids:
                     # NULL key.  Remember this row and add as usual.
-                    seen_ids[row_data['id']] = 1
+                    seen_ids.add(row_data['id'])
 
-                elif all(_ in seen_ids for _ in foreign_ids):
+                elif foreign_ids.issubset(seen_ids):
                     # Non-NULL key we've already seen.  Remember it and commit
                     # so we know the old row exists when we add the new one
                     insert_and_commit()
-                    seen_ids[row_data['id']] = 1
+                    seen_ids.add(row_data['id'])
 
                 else:
                     # Non-NULL future id.  Save this and insert it later!
@@ -286,7 +289,7 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False, s
 
         # Attempt to add any spare rows we've collected
         for row_data, foreign_ids in deferred_rows:
-            if not all(_ in seen_ids for _ in foreign_ids):
+            if not foreign_ids.issubset(seen_ids):
                 # Could happen if row A refers to B which refers to C.
                 # This is ridiculous and doesn't happen in my data so far
                 raise ValueError("Too many levels of self-reference!  "
@@ -295,18 +298,35 @@ def load(session, tables=[], directory=None, drop_tables=False, verbose=False, s
             session.connection().execute(
                 insert_stmt.values(**row_data)
             )
-            seen_ids[row_data['id']] = 1
+            seen_ids.add(row_data['id'])
         session.commit()
 
         print_done()
 
+
+    print_start('Translations')
+    transl = translations.Translations(csv_directory=directory)
+
+    new_row_count = 0
+    for translation_class, rows in transl.get_load_data(langs):
+        table_obj = translation_class.__table__
+        if table_obj in table_objs:
+            insert_stmt = table_obj.insert()
+            session.connection().execute(insert_stmt, rows)
+            session.commit()
+            # We don't have a total, but at least show some increasing number
+            new_row_count += len(rows)
+            print_status(str(new_row_count))
+
+    print_done()
+
     # SQLite check
     if session.connection().dialect.name == 'sqlite':
         session.connection().execute("PRAGMA integrity_check")
 
 
 
-def dump(session, tables=[], directory=None, verbose=False):
+def dump(session, tables=[], directory=None, verbose=False, langs=['en']):
     """Dumps the contents of a database to a set of CSV files.  Probably not
     useful to anyone besides a developer.
 
@@ -322,11 +342,15 @@ def dump(session, tables=[], directory=None, verbose=False):
 
     `verbose`
         If set to True, status messages will be printed to stdout.
+
+    `langs`
+        List of identifiers of languages to dump unofficial texts for
     """
 
     # First take care of verbosity
     print_start, print_status, print_done = _get_verbose_prints(verbose)
 
+    languages = dict((l.id, l) for l in session.query(pokedex.db.tables.Language))
 
     if not directory:
         directory = get_default_csv_dir()
@@ -342,25 +366,43 @@ def dump(session, tables=[], directory=None, verbose=False):
         writer = csv.writer(open("%s/%s.csv" % (directory, table_name), 'wb'),
                             lineterminator='\n')
         columns = [col.name for col in table.columns]
+
+        # For name tables, dump rows for official languages, as well as
+        # for those in `langs`.
+        # For other translation tables, only dump rows for languages in `langs`
+        # For non-translation tables, dump all rows.
+        if 'local_language_id' in columns:
+            if any(col.info.get('official') for col in table.columns):
+                def include_row(row):
+                    return (languages[row.local_language_id].official or
+                            languages[row.local_language_id].identifier in langs)
+            else:
+                def include_row(row):
+                    return languages[row.local_language_id].identifier in langs
+        else:
+            def include_row(row):
+                return True
+
         writer.writerow(columns)
 
         primary_key = table.primary_key
         for row in session.query(table).order_by(*primary_key).all():
-            csvs = []
-            for col in columns:
-                # Convert Pythony values to something more universal
-                val = getattr(row, col)
-                if val == None:
-                    val = ''
-                elif val == True:
-                    val = '1'
-                elif val == False:
-                    val = '0'
-                else:
-                    val = unicode(val).encode('utf-8')
+            if include_row(row):
+                csvs = []
+                for col in columns:
+                    # Convert Pythony values to something more universal
+                    val = getattr(row, col)
+                    if val == None:
+                        val = ''
+                    elif val == True:
+                        val = '1'
+                    elif val == False:
+                        val = '0'
+                    else:
+                        val = unicode(val).encode('utf-8')
 
-                csvs.append(val)
+                    csvs.append(val)
 
-            writer.writerow(csvs)
+                writer.writerow(csvs)
 
         print_done()