/[thuban]/branches/WIP-pyshapelib-bramz/libraries/pyshapelib/dbflibmodule.c
ViewVC logotype

Diff of /branches/WIP-pyshapelib-bramz/libraries/pyshapelib/dbflibmodule.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/thuban/libraries/pyshapelib/dbflib.i revision 2453 by bh, Mon Dec 13 17:57:04 2004 UTC branches/WIP-pyshapelib-bramz/libraries/pyshapelib/dbflibmodule.c revision 2751 by bramz, Wed Mar 28 23:30:15 2007 UTC
# Line 1  Line 1 
1  /* SWIG (www.swig.org) interface file for the dbf interface of shapelib  #include "pyshapelib_common.h"
  *  
  * At the moment (Dec 2000) this file is only useful to generate Python  
  * bindings. Invoke swig as follows:  
  *  
  *      swig -python -shadow dbflib.i  
  *  
  * to generate dbflib_wrap.c and dbflib.py. dbflib_wrap.c defines a  
  * bunch of Python-functions that wrap the appripriate dbflib functions  
  * and dbflib.py contains an object oriented wrapper around  
  * dbflib_wrap.c.  
  *  
  * This module defines one object type: DBFFile.  
  */  
   
 /* this is the dbflib module */  
 %module dbflib  
   
 /* first a %{,%} block. These blocks are copied verbatim to the  
  * dbflib_wrap.c file and are not parsed by SWIG. This is the place to  
  * import headerfiles and define helper-functions that are needed by the  
  * automatically generated wrappers.  
  */  
2    
3  %{  /* --- DBFFile ------------------------------------------------------------------------------------------------------- */
 #include "shapefil.h"  
4    
5    typedef struct {
6            PyObject_HEAD
7            DBFHandle handle;
8    } DBFFileObject;
9    
10  /* Read one attribute from the dbf handle and return it as a new python object  
11   *  
12   * If an error occurs, set the appropriate Python exception and return  /* allocator
13   * NULL.  */
14   *  static PyObject* dbffile_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
15   * Assume that the values of the record and field arguments are valid.  {
16   * The name argument will be passed to DBFGetFieldInfo as is and should          DBFFileObject* self;    
17   * thus be either NULL or a pointer to an array of at least 12 chars          self = (DBFFileObject*) type->tp_alloc(type, 0);
18   */          self->handle = NULL;
19  static PyObject *          return (PyObject*) self;
20  do_read_attribute(DBFInfo * handle, int record, int field, char * name)  }
21  {  
22      int type, width;  
23      PyObject *value;  
24    /* deallocator
25      type = DBFGetFieldInfo(handle, field, name, &width, NULL);  */
26      /* For strings NULL and the empty string are indistinguishable  static void dbffile_dealloc(DBFFileObject* self)
27       * in DBF files. We prefer empty strings instead for backwards  {
28       * compatibility reasons because older wrapper versions returned          DBFClose(self->handle);
29       * emtpy strings as empty strings.          self->handle = NULL;
30       */          self->ob_type->tp_free((PyObject*)self);
31      if (type != FTString && DBFIsAttributeNULL(handle, record, field))  }
32      {  
33          value = Py_None;  
34          Py_INCREF(value);  
35      }  /* constructor
36      else  */
37      {  static int dbffile_init(DBFFileObject* self, PyObject* args, PyObject* kwds)
38          switch (type)  {
39          {          char* file = NULL;
40          case FTString:          char* mode = "rb";
41          {          static char *kwlist[] = {"name", "mode", NULL};
42              const char * temp = DBFReadStringAttribute(handle, record, field);  
43              if (temp)          DBFClose(self->handle);
44              {          self->handle = NULL;
45                  value = PyString_FromString(temp);  
46              }  #if defined(SHPAPI_HAS_WIDE) && defined(Py_WIN_WIDE_FILENAMES)
47              else          if (GetVersion() < 0x80000000) {    /* On NT, so wide API available */
48              {                  PyObject *wfile;
49                  PyErr_Format(PyExc_IOError,                  if (PyArg_ParseTupleAndKeywords(args, kwds, "U|s:DBFFile", kwlist, &wfile, &mode))
50                               "Can't read value for row %d column %d",                  {
51                               record, field);                          PyObject *wmode = PyUnicode_DecodeASCII(mode, strlen(mode), NULL);
52                  value = NULL;                          if (!wmode) return -1;
53              }                          self->handle = DBFOpenW(PyUnicode_AS_UNICODE(wfile), PyUnicode_AS_UNICODE(wmode));
54              break;                          Py_DECREF(wmode);
55                            if (!self->handle)
56                            {
57                                    PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, wfile);
58                                    return -1;
59                            }
60                    }
61                    else
62                    {
63                            /* Drop the argument parsing error as narrow
64                               strings are also valid. */
65                            PyErr_Clear();
66                    }
67          }          }
68          case FTInteger:  #endif
69              value = PyInt_FromLong(DBFReadIntegerAttribute(handle, record,  
70                                                             field));          if (!self->handle)
71              break;          {
72          case FTDouble:                  if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|s:DBFFile", kwlist,
73              value = PyFloat_FromDouble(DBFReadDoubleAttribute(handle, record,                          Py_FileSystemDefaultEncoding, &file, &mode)) return -1;
74                                                                field));                  self->handle = DBFOpen(file, mode);
75              break;  
76          default:                  if (!self->handle)
77              PyErr_Format(PyExc_TypeError, "Invalid field data type %d",                  {
78                           type);                          PyErr_SetFromErrnoWithFilename(PyExc_IOError, file);
79              value = NULL;                          PyMem_Free(file);
80                            return -1;
81                    }
82    
83                    PyMem_Free(file);
84          }          }
     }  
     if (!value)  
         return NULL;  
85    
86      return value;          return 0;
87  }      }
88    
 /* the read_attribute method. Return the value of the given record and  
  * field as a python object of the appropriate type.  
  *  
  * In case of error, set a python exception and return NULL. Since that  
  * value will be returned to the python interpreter as is, the  
  * interpreter should recognize the exception.  
  */  
   
 static PyObject *  
 DBFInfo_read_attribute(DBFInfo * handle, int record, int field)  
 {  
     if (record < 0 || record >= DBFGetRecordCount(handle))  
     {  
         PyErr_Format(PyExc_ValueError,  
                      "record index %d out of bounds (record count: %d)",  
                      record, DBFGetRecordCount(handle));  
         return NULL;  
     }  
89    
     if (field < 0 || field >= DBFGetFieldCount(handle))  
     {  
         PyErr_Format(PyExc_ValueError,  
                      "field index %d out of bounds (field count: %d)",  
                      field, DBFGetFieldCount(handle));  
         return NULL;  
     }  
90    
91      return do_read_attribute(handle, record, field, NULL);  static PyObject* dbffile_close(DBFFileObject* self)
92    {
93            DBFClose(self->handle);
94            self->handle = NULL;
95            Py_RETURN_NONE;
96  }  }
       
97    
 /* the read_record method. Return the record record as a dictionary with  
  * whose keys are the names of the fields, and their values as the  
  * appropriate Python type.  
  *  
  * In case of error, set a python exception and return NULL. Since that  
  * value will be returned to the python interpreter as is, the  
  * interpreter should recognize the exception.  
  */  
   
 static PyObject *  
 DBFInfo_read_record(DBFInfo * handle, int record)  
 {  
     int num_fields;  
     int i;  
     int type, width;  
     char name[12];  
     PyObject *dict;  
     PyObject *value;  
   
     if (record < 0 || record >= DBFGetRecordCount(handle))  
     {  
         PyErr_Format(PyExc_ValueError,  
                      "record index %d out of bounds (record count: %d)",  
                      record, DBFGetRecordCount(handle));  
         return NULL;  
     }  
98    
     dict = PyDict_New();  
     if (!dict)  
         return NULL;  
           
     num_fields = DBFGetFieldCount(handle);  
     for (i = 0; i < num_fields; i++)  
     {  
         value = do_read_attribute(handle, record, i, name);  
         if (!value)  
             goto fail;  
   
         PyDict_SetItemString(dict, name, value);  
         Py_DECREF(value);  
     }  
   
     return dict;  
   
  fail:  
     Py_XDECREF(dict);  
     return NULL;  
 }  
   
 /* the write_record method. Write the record record given wither as a  
  * dictionary or a sequence (i.e. a list or a tuple).  
  *  
  * If it's a dictionary the keys must be the names of the fields and  
  * their value must have a suitable type. Only the fields actually  
  * contained in the dictionary are written. Fields for which there's no  
  * item in the dict are not modified.  
  *  
  * If it's a sequence, all fields must be present in the right order.  
  *  
  * In case of error, set a python exception and return NULL. Since that  
  * value will be returned to the python interpreter as is, the  
  * interpreter should recognize the exception.  
  *  
  * The method is implemented with two c-functions, write_field to write  
  * a single field and DBFInfo_write_record as the front-end.  
  */  
99    
100    static PyObject* dbffile_field_count(DBFFileObject* self)
101    {
102            return PyInt_FromLong((long)DBFGetFieldCount(self->handle));
103    }
104    
 /* write a single field of a record. */  
 static int  
 write_field(DBFHandle handle, int record, int field, int type,  
             PyObject * value)  
 {  
     char * string_value;  
     int int_value;  
     double double_value;  
   
     if (value == Py_None)  
     {  
         if (!DBFWriteNULLAttribute(handle, record, field))  
         {  
             PyErr_Format(PyExc_IOError,  
                          "can't write NULL field %d of record %d",  
                          field, record);  
             return 0;  
         }  
     }  
     else  
     {  
         switch (type)  
         {  
         case FTString:  
             string_value = PyString_AsString(value);  
             if (!string_value)  
                 return 0;  
             if (!DBFWriteStringAttribute(handle, record, field, string_value))  
             {  
                 PyErr_Format(PyExc_IOError,  
                              "can't write field %d of record %d",  
                              field, record);  
                 return 0;  
             }  
             break;  
   
         case FTInteger:  
             int_value = PyInt_AsLong(value);  
             if (int_value == -1 && PyErr_Occurred())  
                 return 0;  
             if (!DBFWriteIntegerAttribute(handle, record, field, int_value))  
             {  
                 PyErr_Format(PyExc_IOError,  
                              "can't write field %d of record %d",  
                              field, record);  
                 return 0;  
             }  
             break;  
   
         case FTDouble:  
             double_value = PyFloat_AsDouble(value);  
             if (double_value == -1 && PyErr_Occurred())  
                 return 0;  
             if (!DBFWriteDoubleAttribute(handle, record, field, double_value))  
             {  
                 PyErr_Format(PyExc_IOError,  
                              "can't write field %d of record %d",  
                              field, record);  
                 return 0;  
             }  
             break;  
   
         default:  
             PyErr_Format(PyExc_TypeError, "Invalid field data type %d", type);  
             return 0;  
         }  
     }  
   
     return 1;  
 }  
   
 static  
 PyObject *  
 DBFInfo_write_record(DBFHandle handle, int record, PyObject *record_object)  
 {  
     int num_fields;  
     int i, length;  
     int type, width;  
     char name[12];  
     PyObject * value = NULL;  
   
     num_fields = DBFGetFieldCount(handle);  
   
     /* We used to use PyMapping_Check to test whether record_object is a  
      * dictionary like object instead of PySequence_Check to test  
      * whether it's a sequence. Unfortunately in Python 2.3  
      * PyMapping_Check returns true for lists and tuples too so the old  
      * approach doesn't work anymore.  
      */  
     if (PySequence_Check(record_object))  
     {  
         /* It's a sequence object. Iterate through all items in the  
          * sequence and write them to the appropriate field.  
          */  
         length = PySequence_Length(record_object);  
         if (length != num_fields)  
         {  
             PyErr_SetString(PyExc_TypeError,  
                             "record must have one item for each field");  
             goto fail;  
         }  
         for (i = 0; i < length; i++)  
         {  
             type = DBFGetFieldInfo(handle, i, name, &width, NULL);  
             value = PySequence_GetItem(record_object, i);  
             if (value)  
             {  
                 if (!write_field(handle, record, i, type, value))  
                     goto fail;  
                 Py_DECREF(value);  
             }  
             else  
             {  
                 goto fail;  
             }  
         }  
     }  
     else  
     {  
         /* It's a dictionary-like object. Iterate over the names of the  
          * known fields and write the corresponding item  
          */  
         for (i = 0; i < num_fields; i++)  
         {  
             type = DBFGetFieldInfo(handle, i, name, &width, NULL);  
105    
106              /* if the dictionary has the key name write that object to  
107               * the appropriate field, other wise just clear the python  static PyObject* dbffile_record_count(DBFFileObject* self)
108               * exception and do nothing.  {
109               */          return PyInt_FromLong((long)DBFGetRecordCount(self->handle));
             value = PyMapping_GetItemString(record_object, name);  
             if (value)  
             {  
                 if (!write_field(handle, record, i, type, value))  
                     goto fail;  
                 Py_DECREF(value);  
             }  
             else  
             {  
                 PyErr_Clear();  
             }  
         }  
     }  
   
     Py_INCREF(Py_None);  
     return Py_None;  
   
  fail:  
     Py_XDECREF(value);  
     return NULL;  
110  }  }
 %}  
111    
112    
113  /* The commit method implementation  
114   *  static PyObject* dbffile_field_info(DBFFileObject* self, PyObject* args)
  * The method relies on the DBFUpdateHeader method which is not  
  * available in shapelib <= 1.2.10.  setup.py defines  
  * HAVE_UPDATE_HEADER's value depending on whether the function is  
  * available in the shapelib version the code is compiled with.  
  */  
 %{  
 static  
 void  
 DBFInfo_commit(DBFHandle handle)  
115  {  {
116  #if HAVE_UPDATE_HEADER          char field_name[12];
117      DBFUpdateHeader(handle);          int field, width = 0, decimals = 0, field_type;
118  #endif          
119            if (!PyArg_ParseTuple(args, "i:field_info", &field)) return NULL;
120            
121            field_name[0] = '\0';
122            field_type = DBFGetFieldInfo(self->handle, field, field_name, &width, &decimals);
123            
124            return Py_BuildValue("isii", field_type, field_name, width, decimals);
125  }  }
 %}  
126    
127    
 /*  
  * The SWIG Interface definition.  
  */  
   
 /* include some common SWIG type definitions and standard exception  
    handling code */  
 %include typemaps.i  
 %include exception.i  
   
 /* As for ShapeFile in shapelib.i, We define a new C-struct that holds  
  * the DBFHandle. This is mainly done so we can separate the close()  
  * method from the destructor but it also helps with exception handling.  
  *  
  * After the DBFFile has been opened or created the handle is not NULL.  
  * The close() method closes the file and sets handle to NULL as an  
  * indicator that the file has been closed.  
  */  
128    
129  %{  static PyObject* dbffile_add_field(DBFFileObject* self, PyObject* args)
130      typedef struct {  {
131          DBFHandle handle;          char* name;
132      } DBFFile;          int type, width, decimals;
133  %}          int field;
134            
135            if (!PyArg_ParseTuple(args, "siii:add_field", &name, &type, &width, &decimals)) return NULL;
136            
137            field = DBFAddField(self->handle, name, (DBFFieldType)type, width, decimals);
138            
139            if (field < 0)
140            {
141                    PyErr_SetString(PyExc_ValueError, "Failed to add field due to inappropriate field definition");
142                    return NULL;
143            }
144            return PyInt_FromLong((long)field);
145    }
146    
147    
148  /* The first argument to the DBFFile methods is a DBFFile pointer.  
149   * We have to check whether handle is not NULL in most methods but not  /* Read one attribute from the dbf handle and return it as a new python object
150   * all. In the destructor and the close method, it's OK for handle to be  *
151   * NULL. We achieve this by checking whether the preprocessor macro  * If an error occurs, set the appropriate Python exception and return
152   * NOCHECK_$name is defined. SWIG replaces $name with the name of the  * NULL.
153   * function for which the code is inserted. In the %{,%}-block below we  *
154   * define the macros for the destructor and the close() method.  * Assume that the values of the record and field arguments are valid.
155   */  * The name argument will be passed to DBFGetFieldInfo as is and should
156    * thus be either NULL or a pointer to an array of at least 12 chars
157  %typemap(python,check) DBFFile *{  */
158  %#ifndef NOCHECK_$name  static PyObject* do_read_attribute(DBFHandle handle, int record, int field, char * name)
159      if (!$target || !$target->handle)  {
160          SWIG_exception(SWIG_TypeError, "dbffile already closed");          int type, width;
161  %#endif          const char* temp;
162  }          type = DBFGetFieldInfo(handle, field, name, &width, NULL);
163            
164  %{          /* For strings NULL and the empty string are indistinguishable
165  #define NOCHECK_delete_DBFFile          * in DBF files. We prefer empty strings instead for backwards
166  #define NOCHECK_DBFFile_close          * compatibility reasons because older wrapper versions returned
167  %}          * emtpy strings as empty strings.
168            */
169            if (type != FTString && DBFIsAttributeNULL(handle, record, field))
170  /* An exception handle for the constructor and the module level open()          {
171   * and create() functions.                  Py_RETURN_NONE;
  *  
  * Annoyingly, we *have* to put braces around the SWIG_exception()  
  * calls, at least in the python case, because of the way the macro is  
  * written. Of course, always putting braces around the branches of an  
  * if-statement is often considered good practice.  
  */  
 %typemap(python,except) DBFFile * {  
     $function;  
     if (!$source)  
     {  
         SWIG_exception(SWIG_MemoryError, "no memory");  
     }  
     else if (!$source->handle)  
     {  
         SWIG_exception(SWIG_IOError, "$name failed");  
     }  
 }  
   
 /* Exception handler for the add_field method */  
 %typemap(python,except) int DBFFile_add_field {  
     $function;  
     if ($source < 0)  
     {  
         SWIG_exception(SWIG_RuntimeError, "add_field failed");  
     }  
 }  
   
 /* define and use some typemaps for the field_info() method whose  
  * C-implementation has three output parameters that are returned  
  * through pointers passed into the function. SWIG already has  
  * definitions for common types such as int* and we can use those for  
  * the last two parameters:  
  */  
   
 %apply int * OUTPUT { int * output_width }  
 %apply int * OUTPUT { int * output_decimals }  
   
 /* the fieldname has to be defined manually: */  
 %typemap(python,ignore) char *fieldname_out(char temp[12]) {  
     $target = temp;  
 }  
   
 %typemap(python,argout) char *fieldname_out() {  
     PyObject * string = PyString_FromString($source);  
     $target = t_output_helper($target,string);  
 }  
   
   
   
 /*  
  * The SWIG-version of the DBFFile struct  
  */  
   
 typedef struct  
 {  
     %addmethods {  
         DBFFile(const char *file, const char * mode = "rb") {  
             DBFFile * self = malloc(sizeof(DBFFile));  
             if (self)  
                 self->handle = DBFOpen(file, mode);  
             return self;  
172          }          }
173                        else
174          ~DBFFile() {          {
175              if (self->handle)                  switch (type)
176                  DBFClose(self->handle);                  {
177              free(self);                  case FTString:
178                            temp = DBFReadStringAttribute(handle, record, field);
179                            if (temp) return PyString_FromString(temp);
180    
181                    case FTInteger:
182                            return PyInt_FromLong((long)DBFReadIntegerAttribute(handle, record, field));
183    
184                    case FTDouble:
185                            return PyFloat_FromDouble(DBFReadDoubleAttribute(handle, record, field));
186                            
187                    case FTLogical:
188                            temp = DBFReadLogicalAttribute(handle, record, field);
189                            if (temp)
190                            {
191                                    switch (temp[0])
192                                    {
193                                    case 'F':
194                                    case 'N':
195                                            Py_RETURN_FALSE;
196                                    case 'T':
197                                    case 'Y':
198                                            Py_RETURN_TRUE;
199                                    }
200                            }
201                            break;
202    
203                    default:
204                            PyErr_Format(PyExc_TypeError, "Invalid field data type %d", type);
205                            return NULL;
206                    }
207          }          }
208            
209            PyErr_Format(PyExc_IOError,     "Can't read value for row %d column %d", record, field);
210            return NULL;
211    }    
212    
213    
         void close() {  
             if (self->handle)  
                 DBFClose(self->handle);  
             self->handle = NULL;  
         }  
214    
215          int field_count() {  /* the read_attribute method. Return the value of the given record and
216              return DBFGetFieldCount(self->handle);  * field as a python object of the appropriate type.
217    */
218    static PyObject* dbffile_read_attribute(DBFFileObject* self, PyObject* args)
219    {
220            int record, field;
221    
222            if (!PyArg_ParseTuple(args, "ii:read_field", &record, &field)) return NULL;
223            
224            if (record < 0 || record >= DBFGetRecordCount(self->handle))
225            {
226                    PyErr_Format(PyExc_ValueError,
227                                    "record index %d out of bounds (record count: %d)",
228                                    record, DBFGetRecordCount(self->handle));
229                    return NULL;
230          }          }
231    
232          int record_count() {          if (field < 0 || field >= DBFGetFieldCount(self->handle))
233              return DBFGetRecordCount(self->handle);          {
234                    PyErr_Format(PyExc_ValueError,
235                                    "field index %d out of bounds (field count: %d)",
236                                    field, DBFGetFieldCount(self->handle));
237                    return NULL;
238          }          }
239    
240          int field_info(int iField, char * fieldname_out,          return do_read_attribute(self->handle, record, field, NULL);
241                         int * output_width, int * output_decimals) {  }
242              return DBFGetFieldInfo(self->handle, iField, fieldname_out,  
243                                     output_width, output_decimals);  
244    
245    /* the read_record method. Return the record record as a dictionary with
246    * whose keys are the names of the fields, and their values as the
247    * appropriate Python type.
248    */
249    static PyObject* dbffile_read_record(DBFFileObject* self, PyObject* args)
250    {
251            int record;
252            int num_fields;
253            int i;
254            char name[12];
255            PyObject *dict;
256            PyObject *value = NULL;
257    
258            if (!PyArg_ParseTuple(args, "i:read_record", &record)) return NULL;
259    
260            if (record < 0 || record >= DBFGetRecordCount(self->handle))
261            {
262                    PyErr_Format(PyExc_ValueError,
263                            "record index %d out of bounds (record count: %d)",
264                            record, DBFGetRecordCount(self->handle));
265                    return NULL;
266          }          }
267                
268          PyObject * read_record(int record) {          dict = PyDict_New();
269              return DBFInfo_read_record(self->handle, record);          if (!dict) return NULL;
270            
271            num_fields = DBFGetFieldCount(self->handle);
272            for (i = 0; i < num_fields; i++)
273            {
274                    value = do_read_attribute(self->handle, record, i, name);
275                    if (!value || PyDict_SetItemString(dict, name, value) < 0) goto fail;
276                    Py_DECREF(value);
277                    value = NULL;
278          }          }
279    
280          PyObject * read_attribute(int record, int field) {          return dict;
281              return DBFInfo_read_attribute(self->handle, record, field);  
282    fail:
283            Py_XDECREF(value);
284            Py_DECREF(dict);
285            return NULL;
286    }
287    
288    
289    
290    /* write a single field of a record. */
291    static int do_write_field(DBFHandle handle, int record, int field, int type, PyObject* value)
292    {
293            char * string_value;
294            int int_value;
295            double double_value;
296            int logical_value;
297    
298            if (value == Py_None)
299            {
300                    if (DBFWriteNULLAttribute(handle, record, field)) return 1;
301          }          }
302            else
303            {
304                    switch (type)
305                    {
306                    case FTString:
307                            string_value = PyString_AsString(value);
308                            if (!string_value) return 0;
309                            if (DBFWriteStringAttribute(handle, record, field, string_value)) return 1;
310                            break;
311    
312                    case FTInteger:
313                            int_value = PyInt_AsLong(value);
314                            if (int_value == -1 && PyErr_Occurred()) return 0;
315                            if (DBFWriteIntegerAttribute(handle, record, field, int_value)) return 1;
316                            break;
317    
318                    case FTDouble:
319                            double_value = PyFloat_AsDouble(value);
320                            if (double_value == -1 && PyErr_Occurred()) return 0;
321                            if (DBFWriteDoubleAttribute(handle, record, field, double_value)) return 1;
322                            break;
323                            
324                    case FTLogical:
325                            logical_value = PyObject_IsTrue(value);
326                            if (logical_value == -1) return 0;
327                            if (DBFWriteLogicalAttribute(handle, record, field, logical_value ? 'T' : 'F')) return 1;
328                            break;
329    
330                    default:
331                            PyErr_Format(PyExc_TypeError, "Invalid field data type %d", type);
332                            return 0;
333                    }
334            }
335    
336            PyErr_Format(PyExc_IOError,     "can't write field %d of record %d", field, record);
337            return 0;
338    }
339    
340    
341          int add_field(const char * pszFieldName, DBFFieldType eType,  
342                        int nWidth, int nDecimals) {  static PyObject* dbffile_write_field(DBFFileObject* self, PyObject* args)
343              return DBFAddField(self->handle, pszFieldName, eType, nWidth,  {
344                                 nDecimals);          int record, field;
345            PyObject* value;
346            int type;
347    
348            if (!PyArg_ParseTuple(args, "iiO:write_field", &record, &field, &value)) return NULL;
349            
350            if (field < 0 || field >= DBFGetFieldCount(self->handle))
351            {
352                    PyErr_Format(PyExc_ValueError,
353                                    "field index %d out of bounds (field count: %d)",
354                                    field, DBFGetFieldCount(self->handle));
355                    return NULL;
356          }          }
357    
358          PyObject *write_record(int record, PyObject *dict_or_sequence) {          type = DBFGetFieldInfo(self->handle, field, NULL, NULL, NULL);
359              return DBFInfo_write_record(self->handle, record,          if (!do_write_field(self->handle, record, field, type, value)) return NULL;
360                                          dict_or_sequence);          Py_RETURN_NONE;
361    }
362    
363    
364    
365    static PyObject* dbffile_write_record(DBFFileObject* self, PyObject* args)
366    {
367            int record;
368            PyObject* record_object;
369            int i, num_fields;
370            
371            int type;
372            char name[12];
373            PyObject* value = NULL;
374            
375            if (!PyArg_ParseTuple(args, "iO:write_record", &record, &record_object)) return NULL;
376            
377            num_fields = DBFGetFieldCount(self->handle);
378            
379            /* mimic ShapeFile functionality where id = -1 means appending */
380            if (record == -1)
381            {
382                    record = num_fields;
383          }          }
384    
385          void commit() {          if (PySequence_Check(record_object))
386              DBFInfo_commit(self->handle);          {
387                    /* It's a sequence object. Iterate through all items in the
388                    * sequence and write them to the appropriate field.
389                    */
390                    if (PySequence_Length(record_object) != num_fields)
391                    {
392                            PyErr_SetString(PyExc_TypeError, "record must have one item for each field");
393                            return NULL;
394                    }
395                    for (i = 0; i < num_fields; ++i)
396                    {
397                            type = DBFGetFieldInfo(self->handle, i, NULL, NULL, NULL);
398                            value = PySequence_GetItem(record_object, i);
399                            if (!value) return NULL;
400                            if (!do_write_field(self->handle, record, i, type, value))
401                            {
402                                    Py_DECREF(value);
403                                    return NULL;
404                            }
405                            Py_DECREF(value);
406                    }
407          }          }
408          /* Delete the commit method from the class if it doesn't have a          else
409           * real implementation.          {
410           */                  /* It's a dictionary-like object. Iterate over the names of the
411          %pragma(python) addtomethod="__class__:if not dbflibc._have_commit: del commit"                  * known fields and write the corresponding item
412                    */
413                    for (i = 0; i < num_fields; ++i)
414                    {
415                            name[0] = '\0';
416                            type = DBFGetFieldInfo(self->handle, i, name, NULL, NULL);
417                            value = PyDict_GetItemString(record_object, name);
418                            if (value && !do_write_field(self->handle, record, i, type, value)) return NULL;
419                    }
420            }
421            
422            return PyInt_FromLong((long)record);
423    }
424    
         /* The __del__ method generated by the old SWIG version we're  
          * tries to access self.thisown which may not be set at all when  
          * there was an exception during construction.  Therefore we  
          * override it with our own version.  
          * FIXME: It would be better to upgrade to a newer SWIG version  
          * or to get rid of SWIG entirely.  
          */  
         %pragma(python) addtoclass = "  
     def __del__(self,dbflibc=dbflibc):  
         if getattr(self, 'thisown', 0):  
             dbflibc.delete_DBFFile(self)  
     "  
425    
426    
427      }  static PyObject* dbffile_repr(DBFFileObject* self)
428  } DBFFile;  {
429            /* TODO: it would be nice to do something like "dbflib.DBFFile(filename, mode)" instead */
430            return PyString_FromFormat("<dbflib.DBFFile object at %p>", self->handle);
431    }
432    
433    
 /*  
  * Two module level functions, open() and create() that correspond to  
  * DBFOpen and DBFCreate respectively. open() is equivalent to the  
  * DBFFile constructor.  
  */  
434    
435    /* The commit method implementation
436    *
437    * The method relies on the DBFUpdateHeader method which is not
438    * available in shapelib <= 1.2.10.  setup.py defines
439    * HAVE_UPDATE_HEADER's value depending on whether the function is
440    * available in the shapelib version the code is compiled with.
441    */
442    #if HAVE_UPDATE_HEADER
443    static PyObject* dbffile_commit(DBFFileObject* self)
444    {
445            DBFUpdateHeader(self->handle);
446            Py_RETURN_NONE;
447    }
448    #endif
449    
 %{  
     DBFFile * open_DBFFile(const char * file, const char * mode)  
     {  
         DBFFile * self = malloc(sizeof(DBFFile));  
         if (self)  
             self->handle = DBFOpen(file, mode);  
         return self;  
     }  
 %}  
450    
 %name(open) %new DBFFile * open_DBFFile(const char * file,  
                                         const char * mode = "rb");  
451    
452  %{  static struct PyMethodDef dbffile_methods[] =
453      DBFFile * create_DBFFile(const char * file)  {
454      {          {"close", (PyCFunction)dbffile_close, METH_NOARGS,
455          DBFFile * self = malloc(sizeof(DBFFile));                  "close() -> None\n\n"
456          if (self)                  "closes DBFFile"},
457              self->handle = DBFCreate(file);          {"field_count", (PyCFunction)dbffile_field_count, METH_NOARGS,
458          return self;                  "field_count() -> integer\n\n"
459      }                  "returns number of fields currently defined"},
460  %}          {"record_count", (PyCFunction)dbffile_record_count, METH_NOARGS,
461  %name(create) %new DBFFile * create_DBFFile(const char * file);                  "record_count() -> integer\n\n"
462                    "returns number of records that currently exist"},
463            {"field_info", (PyCFunction)dbffile_field_info, METH_VARARGS,
464                    "field_info(field_index) -> (type, name, width, decimals)\n\n"
465                    "returns info of a field as a tuple with:\n"
466                    "- type: the type of the field corresponding to the integer value of one "
467                    " of the constants FTString, FTInteger, ...\n"
468                    "- name: the name of the field as a string\n"
469                    "- width: the width of the field as a number of characters\n"
470                    "- decimals: the number of decimal digits" },
471            {"add_field", (PyCFunction)dbffile_add_field, METH_VARARGS,
472                    "add_field(type, name, width, decimals) -> field_index\n\n"
473                    "adds a new field and returns field index if successful\n"
474                    "- type: the type of the field corresponding to the integer value of one "
475                    " of the constants FTString, FTInteger, ...\n"
476                    "- name: the name of the field as a string\n"
477                    "- width: the width of the field as a number of characters\n"
478                    "- decimals: the number of decimal digits" },
479            {"read_attribute", (PyCFunction)dbffile_read_attribute, METH_VARARGS,
480                    "read_attribute(record_index, field_index) -> value\n\n"
481                    "returns the value of one field of a record"},
482            {"read_record", (PyCFunction)dbffile_read_record, METH_VARARGS,
483                    "read_record(record_index) -> dict\n\n"
484                    "returns an entire record as a dictionary of field names and values"},
485            {"write_field", (PyCFunction)dbffile_write_field, METH_VARARGS,
486                    "write_field(record_index, field_index, new_value)\n"
487                    "writes a single field of a record"},
488            {"write_record", (PyCFunction)dbffile_write_record, METH_VARARGS,
489                    "write_record(record_index, record) -> record_index\n\n"
490                    "Writes an entire record as a dict or a sequence, and return index of record\n"
491                    "Record can either be a dictionary in which case the keys are used as field names, "
492                    "or a sequence that must have an item for every field (length = field_count())"},
493    #if HAVE_UPDATE_HEADER
494            {"commit", (PyCFunction)dbffile_commit, METH_NOARGS,
495                    "commit() -> None"},
496    #endif
497            {NULL}
498    };
499    
500    
501    
502  /* constant definitions copied from shapefil.h */  static struct PyGetSetDef dbffile_getsetters[] =
503  typedef enum {  {
504    FTString,          {NULL}
505    FTInteger,  };
506    FTDouble,  
   FTInvalid  
 } DBFFieldType;  
507    
508    
509  /* Put the value of the HAVE_UPDATE_HEADER preprocessor macro into the  static PyTypeObject DBFFileType = PYSHAPELIB_DEFINE_TYPE(DBFFileObject, dbffile, "shapelib.DBFFile", 0);
  * wrapper so that the __class__ pragma above knows when to remove the  
  * commit method  
  */  
 const int _have_commit = HAVE_UPDATE_HEADER;  
510    
511    
512    
513    /* --- dbflib -------------------------------------------------------------------------------------------------------- */
514    
515    static PyObject* dbflib_open(PyObject* module, PyObject* args)
516    {
517            return PyObject_CallObject((PyObject*)&DBFFileType, args);
518    }
519    
520    
521    
522    static PyObject* dbflib_create(PyObject* module, PyObject* args)
523    {
524            char* file;
525            DBFFileObject* result;
526            DBFHandle handle = NULL;
527            int wideargument = 0;
528    
529    #if defined(SHPAPI_HAS_WIDE) && defined(Py_WIN_WIDE_FILENAMES)
530            if (GetVersion() < 0x80000000) {    /* On NT, so wide API available */
531                    PyObject *wfile;
532                    if (PyArg_ParseTuple(args, "U:create", &wfile))
533                    {
534                            wideargument = 1;
535                            handle = DBFCreateW(PyUnicode_AS_UNICODE(wfile));
536                            if (!handle)
537                            {
538                                    PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, wfile);
539                                    return NULL;
540                            }
541                    }
542                    else
543                    {
544                            /* Drop the argument parsing error as narrow
545                               strings are also valid. */
546                            PyErr_Clear();
547                    }
548            }
549    #endif
550            
551            if (!handle)
552            {
553                    if (!PyArg_ParseTuple(args, "et:create", Py_FileSystemDefaultEncoding, &file)) return NULL;
554                    handle = DBFCreate(file);
555                    if (!handle)
556                    {
557                                    PyErr_SetFromErrnoWithFilename(PyExc_IOError, file);
558                                    PyMem_Free(file);
559                                    return NULL;
560                    }
561                    PyMem_Free(file);
562            }
563    
564            result = PyObject_New(DBFFileObject, &DBFFileType);
565            if (!result)
566            {
567                    DBFClose(handle);
568                    return PyErr_NoMemory();
569            }
570            
571            result->handle = handle;
572            return (PyObject*) result;
573    }
574    
575    
576    
577    static struct PyMethodDef dbflib_methods[] =
578    {
579            {"open", (PyCFunction)dbflib_open, METH_VARARGS,
580                    "open(name [, mode]) -> DBFFile\n\n"
581                    "opens a DBFFile" },
582            {"create", (PyCFunction)dbflib_create, METH_VARARGS,
583                    "create(name) -> DBFFile\n\n"
584                    "create a DBFFile" },
585            {NULL}
586    };
587    
588    
589    
590    PyMODINIT_FUNC initdbflib(void)
591    {
592            PyObject* module = Py_InitModule("dbflib", dbflib_methods);
593            if (!module) return;
594            
595            PYSHAPELIB_ADD_TYPE(DBFFileType, "DBFFile");
596            
597            PYSHAPELIB_ADD_CONSTANT(FTString);
598            PYSHAPELIB_ADD_CONSTANT(FTInteger);
599            PYSHAPELIB_ADD_CONSTANT(FTDouble);
600            PYSHAPELIB_ADD_CONSTANT(FTLogical);
601            PYSHAPELIB_ADD_CONSTANT(FTInvalid);
602            PyModule_AddIntConstant(module, "_have_commit", HAVE_UPDATE_HEADER);
603    }

Legend:
Removed from v.2453  
changed lines
  Added in v.2751

[email protected]
ViewVC Help
Powered by ViewVC 1.1.26