root/galaxy-central/tools/new_operations/operation_filter.py @ 2

リビジョン 2, 4.0 KB (コミッタ: hatakeyama, 14 年 前)

import galaxy-central

行番号 
1# runs after the job (and after the default post-filter)
2import os
3from galaxy import eggs
4from galaxy import jobs
5from galaxy.tools.parameters import DataToolParameter
6# Older py compatibility
7try:
8    set()
9except:
10    from sets import Set as set
11
12#def exec_before_process(app, inp_data, out_data, param_dict, tool=None):
13#    """Sets the name of the data"""
14#    dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] )
15#    if len(dbkeys) != 1:
16#        raise Exception, '<p><font color="yellow">Both Queries must be from the same genome build</font></p>'
17
18def validate_input( trans, error_map, param_values, page_param_map ):
19    dbkeys = set()
20    data_param_names = set()
21    data_params = 0
22    for name, param in page_param_map.iteritems():
23        if isinstance( param, DataToolParameter ):
24            # for each dataset parameter
25            if param_values.get(name, None) != None:
26                dbkeys.add( param_values[name].dbkey )
27                data_params += 1
28                # check meta data
29                try:
30                    param = param_values[name]
31                    if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ):
32                        # TODO: currently cannot validate GFF inputs b/c they are not derived from interval.
33                        pass
34                    else: # Validate interval datatype.
35                        startCol = int( param.metadata.startCol )
36                        endCol = int( param.metadata.endCol )
37                        chromCol = int( param.metadata.chromCol )
38                        if param.metadata.strandCol is not None:
39                            strandCol = int ( param.metadata.strandCol )
40                        else:
41                            strandCol = 0
42                except:
43                    error_msg = "The attributes of this dataset are not properly set. " + \
44                    "Click the pencil icon in the history item to set the chrom, start, end and strand columns."
45                    error_map[name] = error_msg
46            data_param_names.add( name )
47    if len( dbkeys ) > 1:
48        for name in data_param_names:
49            error_map[name] = "All datasets must belong to same genomic build, " \
50                "this dataset is linked to build '%s'" % param_values[name].dbkey
51    if data_params != len(data_param_names):
52        for name in data_param_names:
53            error_map[name] = "A dataset of the appropriate type is required"
54
55# Commented out by INS, 5/30/2007.  What is the PURPOSE of this?
56def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
57    """Verify the output data after each run"""
58    items = out_data.items()
59
60    for name, data in items:
61        try:
62            if stderr and len( stderr ) > 0:
63                raise Exception( stderr )
64
65        except Exception, exc:
66            data.blurb = jobs.JOB_ERROR
67            data.state = jobs.JOB_ERROR
68
69## def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
70##     pass
71
72
73def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
74    exec_after_process(
75        app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr)
76
77    # strip strand column if clusters were merged
78    items = out_data.items()
79    for name, data in items:
80        if param_dict['returntype'] == True:
81            data.metadata.chromCol = 1
82            data.metadata.startCol = 2
83            data.metadata.endCol = 3
84        # merge always clobbers strand
85        data.metadata.strandCol = None
86           
87
88def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
89    exec_after_process(
90        app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr)
91
92    # strip strand column if clusters were merged
93    if param_dict["returntype"] == '1':
94        items = out_data.items()
95        for name, data in items:
96            data.metadata.strandCol = None
Note: リポジトリブラウザについてのヘルプは TracBrowser を参照してください。