1 | import operator, os |
---|
2 | from datetime import datetime, timedelta |
---|
3 | from galaxy.web.base.controller import * |
---|
4 | from galaxy import model |
---|
5 | from galaxy.model.orm import * |
---|
6 | import logging |
---|
7 | log = logging.getLogger( __name__ ) |
---|
8 | |
---|
9 | class System( BaseController ): |
---|
10 | @web.expose |
---|
11 | def index( self, trans, **kwd ): |
---|
12 | params = util.Params( kwd ) |
---|
13 | message = '' |
---|
14 | if params.userless_histories_days: |
---|
15 | userless_histories_days = params.userless_histories_days |
---|
16 | else: |
---|
17 | userless_histories_days = '60' |
---|
18 | if params.deleted_histories_days: |
---|
19 | deleted_histories_days = params.deleted_histories_days |
---|
20 | else: |
---|
21 | deleted_histories_days = '60' |
---|
22 | if params.deleted_datasets_days: |
---|
23 | deleted_datasets_days = params.deleted_datasets_days |
---|
24 | else: |
---|
25 | deleted_datasets_days = '60' |
---|
26 | file_path, disk_usage, datasets, file_size_str = self.disk_usage( trans, **kwd ) |
---|
27 | if 'action' in kwd: |
---|
28 | if kwd['action'] == "userless_histories": |
---|
29 | userless_histories_days, message = self.userless_histories( trans, **kwd ) |
---|
30 | elif kwd['action'] == "deleted_histories": |
---|
31 | deleted_histories_days, message = self.deleted_histories( trans, **kwd ) |
---|
32 | elif kwd['action'] == "deleted_datasets": |
---|
33 | deleted_datasets_days, message = self.deleted_datasets( trans, **kwd ) |
---|
34 | return trans.fill_template( '/webapps/reports/system.mako', |
---|
35 | file_path=file_path, |
---|
36 | disk_usage=disk_usage, |
---|
37 | datasets=datasets, |
---|
38 | file_size_str=file_size_str, |
---|
39 | userless_histories_days=userless_histories_days, |
---|
40 | deleted_histories_days=deleted_histories_days, |
---|
41 | deleted_datasets_days=deleted_datasets_days, |
---|
42 | message=message ) |
---|
43 | def userless_histories( self, trans, **kwd ): |
---|
44 | """The number of userless histories and associated datasets that have not been updated for the specified number of days.""" |
---|
45 | params = util.Params( kwd ) |
---|
46 | message = '' |
---|
47 | if params.userless_histories_days: |
---|
48 | userless_histories_days = int( params.userless_histories_days ) |
---|
49 | cutoff_time = datetime.utcnow() - timedelta( days=userless_histories_days ) |
---|
50 | history_count = 0 |
---|
51 | dataset_count = 0 |
---|
52 | for history in trans.sa_session.query( model.History ) \ |
---|
53 | .filter( and_( model.History.table.c.user_id == None, |
---|
54 | model.History.table.c.deleted == True, |
---|
55 | model.History.table.c.update_time < cutoff_time ) ): |
---|
56 | for dataset in history.datasets: |
---|
57 | if not dataset.deleted: |
---|
58 | dataset_count += 1 |
---|
59 | history_count += 1 |
---|
60 | message = "%d userless histories ( including a total of %d datasets ) have not been updated for at least %d days." %( history_count, dataset_count, userless_histories_days ) |
---|
61 | else: |
---|
62 | message = "Enter the number of days." |
---|
63 | return str( userless_histories_days ), message |
---|
64 | def deleted_histories( self, trans, **kwd ): |
---|
65 | """ |
---|
66 | The number of histories that were deleted more than the specified number of days ago, but have not yet been purged. |
---|
67 | Also included is the number of datasets associated with the histories. |
---|
68 | """ |
---|
69 | params = util.Params( kwd ) |
---|
70 | message = '' |
---|
71 | if params.deleted_histories_days: |
---|
72 | deleted_histories_days = int( params.deleted_histories_days ) |
---|
73 | cutoff_time = datetime.utcnow() - timedelta( days=deleted_histories_days ) |
---|
74 | history_count = 0 |
---|
75 | dataset_count = 0 |
---|
76 | disk_space = 0 |
---|
77 | histories = trans.sa_session.query( model.History ) \ |
---|
78 | .filter( and_( model.History.table.c.deleted == True, |
---|
79 | model.History.table.c.purged == False, |
---|
80 | model.History.table.c.update_time < cutoff_time ) ) \ |
---|
81 | .options( eagerload( 'datasets' ) ) |
---|
82 | |
---|
83 | for history in histories: |
---|
84 | for hda in history.datasets: |
---|
85 | if not hda.dataset.purged: |
---|
86 | dataset_count += 1 |
---|
87 | try: |
---|
88 | disk_space += hda.dataset.file_size |
---|
89 | except: |
---|
90 | pass |
---|
91 | history_count += 1 |
---|
92 | message = "%d histories ( including a total of %d datasets ) were deleted more than %d days ago, but have not yet been purged. Disk space: " %( history_count, dataset_count, deleted_histories_days ) + str( disk_space ) |
---|
93 | else: |
---|
94 | message = "Enter the number of days." |
---|
95 | return str( deleted_histories_days ), message |
---|
96 | def deleted_datasets( self, trans, **kwd ): |
---|
97 | """The number of datasets that were deleted more than the specified number of days ago, but have not yet been purged.""" |
---|
98 | params = util.Params( kwd ) |
---|
99 | message = '' |
---|
100 | if params.deleted_datasets_days: |
---|
101 | deleted_datasets_days = int( params.deleted_datasets_days ) |
---|
102 | cutoff_time = datetime.utcnow() - timedelta( days=deleted_datasets_days ) |
---|
103 | dataset_count = 0 |
---|
104 | disk_space = 0 |
---|
105 | for dataset in trans.sa_session.query( model.Dataset ) \ |
---|
106 | .filter( and_( model.Dataset.table.c.deleted == True, |
---|
107 | model.Dataset.table.c.purged == False, |
---|
108 | model.Dataset.table.c.update_time < cutoff_time ) ): |
---|
109 | dataset_count += 1 |
---|
110 | try: |
---|
111 | disk_space += dataset.file_size |
---|
112 | except: |
---|
113 | pass |
---|
114 | message = str( dataset_count ) + " datasets were deleted more than " + str( deleted_datasets_days ) + \ |
---|
115 | " days ago, but have not yet been purged, disk space: " + str( disk_space ) + "." |
---|
116 | else: |
---|
117 | message = "Enter the number of days." |
---|
118 | return str( deleted_datasets_days ), message |
---|
119 | @web.expose |
---|
120 | def dataset_info( self, trans, **kwd ): |
---|
121 | params = util.Params( kwd ) |
---|
122 | message = '' |
---|
123 | dataset = trans.sa_session.query( model.Dataset ).get( trans.security.decode_id( kwd.get( 'id', '' ) ) ) |
---|
124 | # Get all associated hdas and lddas that use the same disk file. |
---|
125 | associated_hdas = trans.sa_session.query( trans.model.HistoryDatasetAssociation ) \ |
---|
126 | .filter( and_( trans.model.HistoryDatasetAssociation.deleted == False, |
---|
127 | trans.model.HistoryDatasetAssociation.dataset_id == dataset.id ) ) \ |
---|
128 | .all() |
---|
129 | associated_lddas = trans.sa_session.query( trans.model.LibraryDatasetDatasetAssociation ) \ |
---|
130 | .filter( and_( trans.model.LibraryDatasetDatasetAssociation.deleted == False, |
---|
131 | trans.model.LibraryDatasetDatasetAssociation.dataset_id == dataset.id ) ) \ |
---|
132 | .all() |
---|
133 | return trans.fill_template( '/webapps/reports/dataset_info.mako', |
---|
134 | dataset=dataset, |
---|
135 | associated_hdas=associated_hdas, |
---|
136 | associated_lddas=associated_lddas, |
---|
137 | message=message ) |
---|
138 | def get_disk_usage( self, file_path ): |
---|
139 | df_cmd = 'df -h ' + file_path |
---|
140 | is_sym_link = os.path.islink( file_path ) |
---|
141 | file_system = disk_size = disk_used = disk_avail = disk_cap_pct = mount = None |
---|
142 | df_file = os.popen( df_cmd ) |
---|
143 | while True: |
---|
144 | df_line = df_file.readline() |
---|
145 | df_line = df_line.strip() |
---|
146 | if df_line: |
---|
147 | df_line = df_line.lower() |
---|
148 | if 'filesystem' in df_line or 'proc' in df_line: |
---|
149 | continue |
---|
150 | elif is_sym_link: |
---|
151 | if ':' in df_line and '/' in df_line: |
---|
152 | mount = df_line |
---|
153 | else: |
---|
154 | try: |
---|
155 | disk_size, disk_used, disk_avail, disk_cap_pct, file_system = df_line.split() |
---|
156 | break |
---|
157 | except: |
---|
158 | pass |
---|
159 | else: |
---|
160 | try: |
---|
161 | file_system, disk_size, disk_used, disk_avail, disk_cap_pct, mount = df_line.split() |
---|
162 | break |
---|
163 | except: |
---|
164 | pass |
---|
165 | else: |
---|
166 | break # EOF |
---|
167 | df_file.close() |
---|
168 | return ( file_system, disk_size, disk_used, disk_avail, disk_cap_pct, mount ) |
---|
169 | @web.expose |
---|
170 | def disk_usage( self, trans, **kwd ): |
---|
171 | file_path = trans.app.config.file_path |
---|
172 | disk_usage = self.get_disk_usage( file_path ) |
---|
173 | min_file_size = 2**32 # 4 Gb |
---|
174 | file_size_str = nice_size( min_file_size ) |
---|
175 | datasets = trans.sa_session.query( model.Dataset ) \ |
---|
176 | .filter( and_( model.Dataset.table.c.purged == False, |
---|
177 | model.Dataset.table.c.file_size > min_file_size ) ) \ |
---|
178 | .order_by( desc( model.Dataset.table.c.file_size ) ) |
---|
179 | return file_path, disk_usage, datasets, file_size_str |
---|
180 | |
---|
181 | def nice_size( size ): |
---|
182 | """Returns a readably formatted string with the size""" |
---|
183 | words = [ 'bytes', 'Kb', 'Mb', 'Gb' ] |
---|
184 | try: |
---|
185 | size = float( size ) |
---|
186 | except: |
---|
187 | return '??? bytes' |
---|
188 | for ind, word in enumerate( words ): |
---|
189 | step = 1024 ** ( ind + 1 ) |
---|
190 | if step > size: |
---|
191 | size = size / float( 1024 ** ind ) |
---|
192 | out = "%.1f %s" % ( size, word ) |
---|
193 | return out |
---|
194 | return '??? bytes' |
---|