1 | from sqlalchemy import * |
---|
2 | from sqlalchemy.orm import * |
---|
3 | from sqlalchemy.exc import * |
---|
4 | from migrate import * |
---|
5 | from migrate.changeset import * |
---|
6 | import datetime |
---|
7 | now = datetime.datetime.utcnow |
---|
8 | import sys, logging |
---|
9 | # Need our custom types, but don't import anything else from model |
---|
10 | from galaxy.model.custom_types import * |
---|
11 | |
---|
12 | log = logging.getLogger( __name__ ) |
---|
13 | log.setLevel(logging.DEBUG) |
---|
14 | handler = logging.StreamHandler( sys.stdout ) |
---|
15 | format = "%(name)s %(levelname)s %(asctime)s %(message)s" |
---|
16 | formatter = logging.Formatter( format ) |
---|
17 | handler.setFormatter( formatter ) |
---|
18 | log.addHandler( handler ) |
---|
19 | |
---|
20 | metadata = MetaData( migrate_engine ) |
---|
21 | db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) |
---|
22 | |
---|
23 | def display_migration_details(): |
---|
24 | print "" |
---|
25 | print "========================================" |
---|
26 | print """This script creates a job_to_output_library_dataset table for allowing library |
---|
27 | uploads to run as regular jobs. To support this, a library_folder_id column is |
---|
28 | added to the job table, and library_folder/output_library_datasets relations |
---|
29 | are added to the Job object. An index is also added to the dataset.state |
---|
30 | column.""" |
---|
31 | print "========================================" |
---|
32 | |
---|
33 | JobToOutputLibraryDatasetAssociation_table = Table( "job_to_output_library_dataset", metadata, |
---|
34 | Column( "id", Integer, primary_key=True ), |
---|
35 | Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ), |
---|
36 | Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ), |
---|
37 | Column( "name", String(255) ) ) |
---|
38 | |
---|
39 | def upgrade(): |
---|
40 | display_migration_details() |
---|
41 | # Load existing tables |
---|
42 | metadata.reflect() |
---|
43 | # Create the job_to_output_library_dataset table |
---|
44 | try: |
---|
45 | JobToOutputLibraryDatasetAssociation_table.create() |
---|
46 | except Exception, e: |
---|
47 | print "Creating job_to_output_library_dataset table failed: %s" % str( e ) |
---|
48 | log.debug( "Creating job_to_output_library_dataset table failed: %s" % str( e ) ) |
---|
49 | # Create the library_folder_id column |
---|
50 | try: |
---|
51 | Job_table = Table( "job", metadata, autoload=True ) |
---|
52 | except NoSuchTableError: |
---|
53 | Job_table = None |
---|
54 | log.debug( "Failed loading table job" ) |
---|
55 | if Job_table: |
---|
56 | try: |
---|
57 | col = Column( "library_folder_id", Integer, index=True ) |
---|
58 | col.create( Job_table ) |
---|
59 | assert col is Job_table.c.library_folder_id |
---|
60 | except Exception, e: |
---|
61 | log.debug( "Adding column 'library_folder_id' to job table failed: %s" % ( str( e ) ) ) |
---|
62 | try: |
---|
63 | LibraryFolder_table = Table( "library_folder", metadata, autoload=True ) |
---|
64 | except NoSuchTableError: |
---|
65 | LibraryFolder_table = None |
---|
66 | log.debug( "Failed loading table library_folder" ) |
---|
67 | # Add 1 foreign key constraint to the job table |
---|
68 | if Job_table and LibraryFolder_table: |
---|
69 | try: |
---|
70 | cons = ForeignKeyConstraint( [Job_table.c.library_folder_id], |
---|
71 | [LibraryFolder_table.c.id], |
---|
72 | name='job_library_folder_id_fk' ) |
---|
73 | # Create the constraint |
---|
74 | cons.create() |
---|
75 | except Exception, e: |
---|
76 | log.debug( "Adding foreign key constraint 'job_library_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) ) |
---|
77 | # Create the ix_dataset_state index |
---|
78 | try: |
---|
79 | Dataset_table = Table( "dataset", metadata, autoload=True ) |
---|
80 | except NoSuchTableError: |
---|
81 | Dataset_table = None |
---|
82 | log.debug( "Failed loading table dataset" ) |
---|
83 | i = Index( "ix_dataset_state", Dataset_table.c.state ) |
---|
84 | try: |
---|
85 | i.create() |
---|
86 | except Exception, e: |
---|
87 | print str(e) |
---|
88 | log.debug( "Adding index 'ix_dataset_state' to dataset table failed: %s" % str( e ) ) |
---|
89 | |
---|
90 | def downgrade(): |
---|
91 | metadata.reflect() |
---|
92 | # Drop the library_folder_id column |
---|
93 | try: |
---|
94 | Job_table = Table( "job", metadata, autoload=True ) |
---|
95 | except NoSuchTableError: |
---|
96 | Job_table = None |
---|
97 | log.debug( "Failed loading table job" ) |
---|
98 | if Job_table: |
---|
99 | try: |
---|
100 | col = Job_table.c.library_folder_id |
---|
101 | col.drop() |
---|
102 | except Exception, e: |
---|
103 | log.debug( "Dropping column 'library_folder_id' from job table failed: %s" % ( str( e ) ) ) |
---|
104 | # Drop the job_to_output_library_dataset table |
---|
105 | try: |
---|
106 | JobToOutputLibraryDatasetAssociation_table.drop() |
---|
107 | except Exception, e: |
---|
108 | print str(e) |
---|
109 | log.debug( "Dropping job_to_output_library_dataset table failed: %s" % str( e ) ) |
---|
110 | # Drop the ix_dataset_state index |
---|
111 | try: |
---|
112 | Dataset_table = Table( "dataset", metadata, autoload=True ) |
---|
113 | except NoSuchTableError: |
---|
114 | Dataset_table = None |
---|
115 | log.debug( "Failed loading table dataset" ) |
---|
116 | i = Index( "ix_dataset_state", Dataset_table.c.state ) |
---|
117 | try: |
---|
118 | i.drop() |
---|
119 | except Exception, e: |
---|
120 | print str(e) |
---|
121 | log.debug( "Dropping index 'ix_dataset_state' from dataset table failed: %s" % str( e ) ) |
---|