Mercurial > p > roundup > code
comparison roundup/admin.py @ 4255:88af08f8666f
New config option csv_field_size:
Pythons csv module (which is used for export/import) has a new field
size limit starting with python2.5. We now issue a warning during
export if the limit is too small and use the csv_field_size
configuration during import to set the limit for the csv module.
| author | Ralf Schlatterbeck <schlatterbeck@users.sourceforge.net> |
|---|---|
| date | Tue, 29 Sep 2009 07:27:17 +0000 |
| parents | eddb82d0964c |
| children | ab4563e97d22 |
comparison
equal
deleted
inserted
replaced
| 4254:8d3582271a99 | 4255:88af08f8666f |
|---|---|
| 1097 | 1097 |
| 1098 # make sure target dir exists | 1098 # make sure target dir exists |
| 1099 if not os.path.exists(dir): | 1099 if not os.path.exists(dir): |
| 1100 os.makedirs(dir) | 1100 os.makedirs(dir) |
| 1101 | 1101 |
| 1102 # maximum csv field length exceeding configured size? | |
| 1103 max_len = self.db.config.CSV_FIELD_SIZE | |
| 1104 | |
| 1102 # do all the classes specified | 1105 # do all the classes specified |
| 1103 for classname in classes: | 1106 for classname in classes: |
| 1104 cl = self.get_class(classname) | 1107 cl = self.get_class(classname) |
| 1105 | 1108 |
| 1106 if not export_files and hasattr(cl, 'export_files'): | 1109 if not export_files and hasattr(cl, 'export_files'): |
| 1119 # all nodes for this class | 1122 # all nodes for this class |
| 1120 for nodeid in cl.getnodeids(): | 1123 for nodeid in cl.getnodeids(): |
| 1121 if self.verbose: | 1124 if self.verbose: |
| 1122 sys.stdout.write('\rExporting %s - %s'%(classname, nodeid)) | 1125 sys.stdout.write('\rExporting %s - %s'%(classname, nodeid)) |
| 1123 sys.stdout.flush() | 1126 sys.stdout.flush() |
| 1124 writer.writerow(cl.export_list(propnames, nodeid)) | 1127 node = cl.getnode(nodeid) |
| 1128 exp = cl.export_list(propnames, nodeid) | |
| 1129 lensum = sum (len (repr(node[p])) for p in propnames) | |
| 1130 # for a safe upper bound of field length we add | |
| 1131 # difference between CSV len and sum of all field lengths | |
| 1132 d = sum (len(x) for x in exp) - lensum | |
| 1133 assert (d > 0) | |
| 1134 for p in propnames: | |
| 1135 ll = len(repr(node[p])) + d | |
| 1136 if ll > max_len: | |
| 1137 max_len = ll | |
| 1138 writer.writerow(exp) | |
| 1125 if export_files and hasattr(cl, 'export_files'): | 1139 if export_files and hasattr(cl, 'export_files'): |
| 1126 cl.export_files(dir, nodeid) | 1140 cl.export_files(dir, nodeid) |
| 1127 | 1141 |
| 1128 # close this file | 1142 # close this file |
| 1129 f.close() | 1143 f.close() |
| 1134 sys.stdout.write("\nExporting Journal for %s\n" % classname) | 1148 sys.stdout.write("\nExporting Journal for %s\n" % classname) |
| 1135 sys.stdout.flush() | 1149 sys.stdout.flush() |
| 1136 journals = csv.writer(jf, colon_separated) | 1150 journals = csv.writer(jf, colon_separated) |
| 1137 map(journals.writerow, cl.export_journals()) | 1151 map(journals.writerow, cl.export_journals()) |
| 1138 jf.close() | 1152 jf.close() |
| 1153 if max_len > self.db.config.CSV_FIELD_SIZE: | |
| 1154 print >> sys.stderr, \ | |
| 1155 "Warning: config csv_field_size should be at least %s"%max_len | |
| 1139 return 0 | 1156 return 0 |
| 1140 | 1157 |
| 1141 def do_exporttables(self, args): | 1158 def do_exporttables(self, args): |
| 1142 ''"""Usage: exporttables [[-]class[,class]] export_dir | 1159 ''"""Usage: exporttables [[-]class[,class]] export_dir |
| 1143 Export the database to colon-separated-value files, excluding the | 1160 Export the database to colon-separated-value files, excluding the |
| 1174 database (or, tediously, retire all the old data.) | 1191 database (or, tediously, retire all the old data.) |
| 1175 """ | 1192 """ |
| 1176 if len(args) < 1: | 1193 if len(args) < 1: |
| 1177 raise UsageError, _('Not enough arguments supplied') | 1194 raise UsageError, _('Not enough arguments supplied') |
| 1178 from roundup import hyperdb | 1195 from roundup import hyperdb |
| 1196 | |
| 1197 if hasattr (csv, 'field_size_limit'): | |
| 1198 csv.field_size_limit(self.db.config.CSV_FIELD_SIZE) | |
| 1179 | 1199 |
| 1180 # directory to import from | 1200 # directory to import from |
| 1181 dir = args[0] | 1201 dir = args[0] |
| 1182 | 1202 |
| 1183 class colon_separated(csv.excel): | 1203 class colon_separated(csv.excel): |
| 1210 # do the import and figure the current highest nodeid | 1230 # do the import and figure the current highest nodeid |
| 1211 nodeid = cl.import_list(file_props, r) | 1231 nodeid = cl.import_list(file_props, r) |
| 1212 if hasattr(cl, 'import_files'): | 1232 if hasattr(cl, 'import_files'): |
| 1213 cl.import_files(dir, nodeid) | 1233 cl.import_files(dir, nodeid) |
| 1214 maxid = max(maxid, int(nodeid)) | 1234 maxid = max(maxid, int(nodeid)) |
| 1215 print | 1235 print >> sys.stdout |
| 1216 f.close() | 1236 f.close() |
| 1217 | 1237 |
| 1218 # import the journals | 1238 # import the journals |
| 1219 f = open(os.path.join(args[0], classname + '-journals.csv'), 'r') | 1239 f = open(os.path.join(args[0], classname + '-journals.csv'), 'r') |
| 1220 reader = csv.reader(f, colon_separated) | 1240 reader = csv.reader(f, colon_separated) |
| 1221 cl.import_journals(reader) | 1241 cl.import_journals(reader) |
| 1222 f.close() | 1242 f.close() |
| 1223 | 1243 |
| 1224 # set the id counter | 1244 # set the id counter |
| 1225 print 'setting', classname, maxid+1 | 1245 print >> sys.stdout, 'setting', classname, maxid+1 |
| 1226 self.db.setid(classname, str(maxid+1)) | 1246 self.db.setid(classname, str(maxid+1)) |
| 1227 | 1247 |
| 1228 self.db_uncommitted = True | 1248 self.db_uncommitted = True |
| 1229 return 0 | 1249 return 0 |
| 1230 | 1250 |
