Mercurial > p > roundup > code
comparison roundup/admin.py @ 6487:a101541fc494
Handle ResourceWarning in admin.py do_export
ResourceWarning: unclosed file <_io.TextIOWrapper
name='_test_export/priority.csv' mode='r' encoding='UTF-8'>
| author | John Rouillard <rouilj@ieee.org> |
|---|---|
| date | Mon, 06 Sep 2021 14:41:23 -0400 |
| parents | ada1edcc9132 |
| children | 087cae2fbcea |
comparison
equal
deleted
inserted
replaced
| 6486:8c371af7e785 | 6487:a101541fc494 |
|---|---|
| 1308 | 1308 |
| 1309 if not export_files and hasattr(cl, 'export_files'): | 1309 if not export_files and hasattr(cl, 'export_files'): |
| 1310 sys.stdout.write('Exporting %s WITHOUT the files\r\n' % | 1310 sys.stdout.write('Exporting %s WITHOUT the files\r\n' % |
| 1311 classname) | 1311 classname) |
| 1312 | 1312 |
| 1313 f = open(os.path.join(dir, classname+'.csv'), 'w') | 1313 with open(os.path.join(dir, classname+'.csv'), 'w') as f: |
| 1314 writer = csv.writer(f, colon_separated) | 1314 writer = csv.writer(f, colon_separated) |
| 1315 | 1315 |
| 1316 properties = cl.getprops() | 1316 properties = cl.getprops() |
| 1317 propnames = cl.export_propnames() | 1317 propnames = cl.export_propnames() |
| 1318 fields = propnames[:] | 1318 fields = propnames[:] |
| 1319 fields.append('is retired') | 1319 fields.append('is retired') |
| 1320 writer.writerow(fields) | 1320 writer.writerow(fields) |
| 1321 | 1321 |
| 1322 # If a node has a key, sort all nodes by key | 1322 # If a node has a key, sort all nodes by key |
| 1323 # with retired nodes first. Retired nodes | 1323 # with retired nodes first. Retired nodes |
| 1324 # must occur before a non-retired node with | 1324 # must occur before a non-retired node with |
| 1325 # the same key. Otherwise you get an | 1325 # the same key. Otherwise you get an |
| 1326 # IntegrityError: UNIQUE constraint failed: | 1326 # IntegrityError: UNIQUE constraint failed: |
| 1327 # _class.__retired__, _<class>._<keyname> | 1327 # _class.__retired__, _<class>._<keyname> |
| 1328 # on imports to rdbms. | 1328 # on imports to rdbms. |
| 1329 all_nodes = cl.getnodeids() | 1329 all_nodes = cl.getnodeids() |
| 1330 | 1330 |
| 1331 classkey = cl.getkey() | 1331 classkey = cl.getkey() |
| 1332 if classkey: # False sorts before True, so negate is_retired | 1332 if classkey: # False sorts before True, so negate is_retired |
| 1333 keysort = lambda i: (cl.get(i, classkey), | 1333 keysort = lambda i: (cl.get(i, classkey), |
| 1334 not cl.is_retired(i)) | 1334 not cl.is_retired(i)) |
| 1335 all_nodes.sort(key=keysort) | 1335 all_nodes.sort(key=keysort) |
| 1336 # if there is no classkey no need to sort | 1336 # if there is no classkey no need to sort |
| 1337 | 1337 |
| 1338 for nodeid in all_nodes: | 1338 for nodeid in all_nodes: |
| 1339 if self.verbose: | |
| 1340 sys.stdout.write('\rExporting %s - %s' % | |
| 1341 (classname, nodeid)) | |
| 1342 sys.stdout.flush() | |
| 1343 node = cl.getnode(nodeid) | |
| 1344 exp = cl.export_list(propnames, nodeid) | |
| 1345 lensum = sum([len(repr_export(node[p])) for p in propnames]) | |
| 1346 # for a safe upper bound of field length we add | |
| 1347 # difference between CSV len and sum of all field lengths | |
| 1348 d = sum([len(x) for x in exp]) - lensum | |
| 1349 if not d > 0: | |
| 1350 raise AssertionError("Bad assertion d > 0") | |
| 1351 for p in propnames: | |
| 1352 ll = len(repr_export(node[p])) + d | |
| 1353 if ll > max_len: | |
| 1354 max_len = ll | |
| 1355 writer.writerow(exp) | |
| 1356 if export_files and hasattr(cl, 'export_files'): | |
| 1357 cl.export_files(dir, nodeid) | |
| 1358 | |
| 1359 # export the journals | |
| 1360 with open(os.path.join(dir, classname+'-journals.csv'), 'w') as jf: | |
| 1339 if self.verbose: | 1361 if self.verbose: |
| 1340 sys.stdout.write('\rExporting %s - %s' % | 1362 sys.stdout.write("\nExporting Journal for %s\n" % classname) |
| 1341 (classname, nodeid)) | |
| 1342 sys.stdout.flush() | 1363 sys.stdout.flush() |
| 1343 node = cl.getnode(nodeid) | 1364 journals = csv.writer(jf, colon_separated) |
| 1344 exp = cl.export_list(propnames, nodeid) | 1365 for row in cl.export_journals(): |
| 1345 lensum = sum([len(repr_export(node[p])) for p in propnames]) | 1366 journals.writerow(row) |
| 1346 # for a safe upper bound of field length we add | |
| 1347 # difference between CSV len and sum of all field lengths | |
| 1348 d = sum([len(x) for x in exp]) - lensum | |
| 1349 if not d > 0: | |
| 1350 raise AssertionError("Bad assertion d > 0") | |
| 1351 for p in propnames: | |
| 1352 ll = len(repr_export(node[p])) + d | |
| 1353 if ll > max_len: | |
| 1354 max_len = ll | |
| 1355 writer.writerow(exp) | |
| 1356 if export_files and hasattr(cl, 'export_files'): | |
| 1357 cl.export_files(dir, nodeid) | |
| 1358 | |
| 1359 # close this file | |
| 1360 f.close() | |
| 1361 | |
| 1362 # export the journals | |
| 1363 jf = open(os.path.join(dir, classname+'-journals.csv'), 'w') | |
| 1364 if self.verbose: | |
| 1365 sys.stdout.write("\nExporting Journal for %s\n" % classname) | |
| 1366 sys.stdout.flush() | |
| 1367 journals = csv.writer(jf, colon_separated) | |
| 1368 for row in cl.export_journals(): | |
| 1369 journals.writerow(row) | |
| 1370 jf.close() | |
| 1371 if max_len > self.db.config.CSV_FIELD_SIZE: | 1367 if max_len > self.db.config.CSV_FIELD_SIZE: |
| 1372 print("Warning: config csv_field_size should be at least %s" % | 1368 print("Warning: config csv_field_size should be at least %s" % |
| 1373 max_len, file=sys.stderr) | 1369 max_len, file=sys.stderr) |
| 1370 jf.close() | |
| 1374 return 0 | 1371 return 0 |
| 1375 | 1372 |
| 1376 def do_exporttables(self, args): | 1373 def do_exporttables(self, args): |
| 1377 ''"""Usage: exporttables [[-]class[,class]] export_dir | 1374 ''"""Usage: exporttables [[-]class[,class]] export_dir |
| 1378 Export the database to colon-separated-value files, excluding the | 1375 Export the database to colon-separated-value files, excluding the |
