/[thuban]/branches/WIP-pyshapelib-bramz/Thuban/Model/table.py
ViewVC logotype

Diff of /branches/WIP-pyshapelib-bramz/Thuban/Model/table.py

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1043 by bh, Mon May 26 19:27:15 2003 UTC revision 1599 by bh, Mon Aug 18 12:45:28 2003 UTC
# Line 132  class DBFTable(TitledObject, OldTableInt Line 132  class DBFTable(TitledObject, OldTableInt
132      # work because a DBF file object buffers some data      # work because a DBF file object buffers some data
133    
134      def __init__(self, filename):      def __init__(self, filename):
135          self.filename = filename          self.filename = os.path.abspath(filename)
136          title = os.path.basename(self.filename)  
137            # Omit the extension in the title as it's not really needed and
138            # it can be confusing because dbflib removes extensions and
139            # appends some variations of '.dbf' before it tries to open the
140            # file. So the title could be e.g. myshapefile.shp when the real
141            # filename is myshapefile.dbf
142            title = os.path.splitext(os.path.basename(self.filename))[0]
143          TitledObject.__init__(self, title)          TitledObject.__init__(self, title)
144    
145          self.dbf = dbflib.DBFFile(filename)          self.dbf = dbflib.DBFFile(filename)
146    
147          # If true, self.dbf is open for writing.          # If true, self.dbf is open for writing.
# Line 406  class MemoryTable(TitledObject, OldTable Line 413  class MemoryTable(TitledObject, OldTable
413          self.data[record] = values          self.data[record] = values
414    
415    
416  def table_to_dbf(table, filename):  
417      """Create the dbf file filename from the table"""  def _find_dbf_column_names(names):
418        """Determine the column names to use in a DBF file
419    
420        DBF files have a length limit of 10 characters on the column names
421        so when writing an arbitrary Thuban table to a DBF file we may have
422        we may have to rename some of the columns making sure that they're
423        unique in the DBF file too.
424    
425        Names that are already short enough will stay the same. Longer names
426        will be truncated to 10 characters or if that isn't unique it will
427        be truncated more and filled up with digits.
428    
429        The parameter names should be a list of the column names. The return
430        value will be a dictionary mapping the names in the input list to
431        the names to use in the DBF file.
432        """
433        # mapping from the original names in table to the names in the DBF
434        # file
435        name_map = {}
436    
437        # First, we keep all names that are already short enough
438        for i in range(len(names) - 1, -1, -1):
439            if len(names[i]) <= 10:
440                name_map[names[i]] = names[i]
441                del names[i]
442    
443        # dict used as a set of all names already used as DBF column names
444        used = name_map.copy()
445    
446        # Go through all longer names. If the name truncated to 10
447        # characters is not used already, we use that. Otherwise we truncate
448        # it more and append numbers until we get an unused name
449        for name in names:
450            truncated = name[:10]
451            num = 0; numstr = ""
452            #print "truncated", truncated, num
453            while truncated in used and len(numstr) < 10:
454                num += 1
455                numstr = str(num)
456                truncated = name[:10 - len(numstr)] + numstr
457                #print "truncated", truncated, num
458            if len(numstr) >= 10:
459                # This case should never happen in practice as tables with
460                # 10^10 columns seem very unlikely :)
461                raise ValueError("Can't find unique dbf column name")
462    
463            name_map[name] = truncated
464            used[truncated] = 1
465    
466        return name_map
467    
468    def table_to_dbf(table, filename, rows = None):
469        """Create the dbf file filename from the table.
470        
471        If rows is not None (the default) then it must be a list of row
472        indices to be saved to the file, otherwise all rows are saved.
473        """
474    
475      dbf = dbflib.create(filename)      dbf = dbflib.create(filename)
476    
477      dbflib_fieldtypes = {FIELDTYPE_STRING: dbflib.FTString,      dbflib_fieldtypes = {FIELDTYPE_STRING: dbflib.FTString,
478                           FIELDTYPE_INT: dbflib.FTInteger,                           FIELDTYPE_INT: dbflib.FTInteger,
479                           FIELDTYPE_DOUBLE: dbflib.FTDouble}                           FIELDTYPE_DOUBLE: dbflib.FTDouble}
480    
481    
482        name_map = _find_dbf_column_names([col.name for col in table.Columns()])
483    
484      # Initialise the header. Distinguish between DBFTable and others.      # Initialise the header. Distinguish between DBFTable and others.
485      for col in table.Columns():      for col in table.Columns():
486          width = table.Width(col.name)          width = table.Width(col.name)
# Line 421  def table_to_dbf(table, filename): Line 488  def table_to_dbf(table, filename):
488              prec = getattr(col, "prec", 12)              prec = getattr(col, "prec", 12)
489          else:          else:
490              prec = 0              prec = 0
491          dbf.add_field(col.name, dbflib_fieldtypes[col.type], width, prec)          dbf.add_field(name_map[col.name], dbflib_fieldtypes[col.type],
492                          width, prec)
493    
494      for i in range(table.NumRows()):      if rows is None:
495          record = table.ReadRowAsDict(i)          rows = range(table.NumRows())
496          dbf.write_record(i, record)  
497        recNum = 0
498        for i in rows:
499            record = {}
500            for key, value in table.ReadRowAsDict(i).items():
501                record[name_map[key]] = value
502            dbf.write_record(recNum, record)
503            recNum += 1
504      dbf.close()      dbf.close()
505    
506  def table_to_csv(table, filename):  def table_to_csv(table, filename, rows = None):
507      """Export table to csv file."""      """Export table to csv file.
508        
509        If rows is not None (the default) then it must be a list of row
510        indices to be saved to the file, otherwise all rows are saved.
511        """
512    
513      file = open(filename,"w")      file = open(filename,"w")
514      columns = table.Columns()      columns = table.Columns()
# Line 440  def table_to_csv(table, filename): Line 519  def table_to_csv(table, filename):
519          header = header + "\n"          header = header + "\n"
520          file.write(header)          file.write(header)
521    
522          for i in range(table.NumRows()):          if rows is None:
523                rows = range(table.NumRows())
524    
525            for i in rows:
526              record = table.ReadRowAsDict(i)              record = table.ReadRowAsDict(i)
527              if len(record):              if len(record):
528                  line = "%s" % record[columns[0].name]                  line = "%s" % record[columns[0].name]

Legend:
Removed from v.1043  
changed lines
  Added in v.1599

[email protected]
ViewVC Help
Powered by ViewVC 1.1.26