/[thuban]/branches/WIP-pyshapelib-bramz/libraries/pyshapelib/dbflibmodule.c
ViewVC logotype

Diff of /branches/WIP-pyshapelib-bramz/libraries/pyshapelib/dbflibmodule.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/thuban/libraries/pyshapelib/dbflib.i revision 1611 by jan, Tue Aug 19 21:24:20 2003 UTC branches/WIP-pyshapelib-bramz/libraries/pyshapelib/dbflibmodule.c revision 2751 by bramz, Wed Mar 28 23:30:15 2007 UTC
# Line 1  Line 1 
1  /* SWIG (www.swig.org) interface file for the dbf interface of shapelib  #include "pyshapelib_common.h"
  *  
  * At the moment (Dec 2000) this file is only useful to generate Python  
  * bindings. Invoke swig as follows:  
  *  
  *      swig -python -shadow dbflib.i  
  *  
  * to generate dbflib_wrap.c and dbflib.py. dbflib_wrap.c defines a  
  * bunch of Python-functions that wrap the appripriate dbflib functions  
  * and dbflib.py contains an object oriented wrapper around  
  * dbflib_wrap.c.  
  *  
  * This module defines one object type: DBFFile.  
  */  
   
 /* this is the dbflib module */  
 %module dbflib  
   
 /* first a %{,%} block. These blocks are copied verbatim to the  
  * dbflib_wrap.c file and are not parsed by SWIG. This is the place to  
  * import headerfiles and define helper-functions that are needed by the  
  * automatically generated wrappers.  
  */  
2    
3  %{  /* --- DBFFile ------------------------------------------------------------------------------------------------------- */
 #include "shapefil.h"  
4    
5  /* the read_record method. Return the record record as a dictionary with  typedef struct {
6   * whose keys are the names of the fields, and their values as the          PyObject_HEAD
7   * appropriate Python type.          DBFHandle handle;
8   *  } DBFFileObject;
  * In case of error, set a python exception and return NULL. Since that  
  * value will be returned to the python interpreter as is, the  
  * interpreter should recognize the exception.  
  */  
   
 static PyObject *  
 DBFInfo_read_record(DBFInfo * handle, int record)  
 {  
     int num_fields;  
     int i;  
     int type, width;  
     char name[12];  
     PyObject *dict;  
     PyObject *value;  
   
     if (record < 0 || record >= DBFGetRecordCount(handle))  
     {  
         PyErr_Format(PyExc_ValueError,  
                      "record index %d out of bounds (record count: %d)",  
                      record, DBFGetRecordCount(handle));  
         return NULL;  
     }  
9    
10      dict = PyDict_New();  
11      if (!dict)  
12          return NULL;  /* allocator
13            */
14      num_fields = DBFGetFieldCount(handle);  static PyObject* dbffile_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
15      for (i = 0; i < num_fields; i++)  {
16      {          DBFFileObject* self;    
17          type = DBFGetFieldInfo(handle, i, name, &width, NULL);          self = (DBFFileObject*) type->tp_alloc(type, 0);
18          /* For strings NULL and the empty string are indistinguishable          self->handle = NULL;
19           * in DBF files. We prefer empty strings instead for backwards          return (PyObject*) self;
20           * compatibility reasons because older wrapper versions returned  }
21           * emtpy strings as empty strings.  
22           */  
23          if (type != FTString && DBFIsAttributeNULL(handle, record, i))  
24          {  /* deallocator
25              value = Py_None;  */
26              Py_INCREF(value);  static void dbffile_dealloc(DBFFileObject* self)
27          }  {
28          else          DBFClose(self->handle);
29          {          self->handle = NULL;
30              switch (type)          self->ob_type->tp_free((PyObject*)self);
31              {  }
32              case FTString:  
33              {  
34                  const char * temp = DBFReadStringAttribute(handle, record, i);  
35                  if (temp)  /* constructor
36    */
37    static int dbffile_init(DBFFileObject* self, PyObject* args, PyObject* kwds)
38    {
39            char* file = NULL;
40            char* mode = "rb";
41            static char *kwlist[] = {"name", "mode", NULL};
42    
43            DBFClose(self->handle);
44            self->handle = NULL;
45    
46    #if defined(SHPAPI_HAS_WIDE) && defined(Py_WIN_WIDE_FILENAMES)
47            if (GetVersion() < 0x80000000) {    /* On NT, so wide API available */
48                    PyObject *wfile;
49                    if (PyArg_ParseTupleAndKeywords(args, kwds, "U|s:DBFFile", kwlist, &wfile, &mode))
50                  {                  {
51                      value = PyString_FromString(temp);                          PyObject *wmode = PyUnicode_DecodeASCII(mode, strlen(mode), NULL);
52                  }                          if (!wmode) return -1;
53                  else                          self->handle = DBFOpenW(PyUnicode_AS_UNICODE(wfile), PyUnicode_AS_UNICODE(wmode));
54                            Py_DECREF(wmode);
55                            if (!self->handle)
56                            {
57                                    PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, wfile);
58                                    return -1;
59                            }
60                    }
61                    else
62                  {                  {
63                      PyErr_Format(PyExc_IOError,                          /* Drop the argument parsing error as narrow
64                                   "Can't read value for row %d column %d",                             strings are also valid. */
65                                   record, i);                          PyErr_Clear();
                     value = NULL;  
66                  }                  }
                 break;  
             }  
             case FTInteger:  
                 value = PyInt_FromLong(DBFReadIntegerAttribute(handle, record,  
                                                                i));  
                 break;  
             case FTDouble:  
                 value = PyFloat_FromDouble(DBFReadDoubleAttribute(handle,  
                                                                   record, i));  
                 break;  
             default:  
                 PyErr_Format(PyExc_TypeError, "Invalid field data type %d",  
                              type);  
                 value = NULL;  
             }  
67          }          }
68          if (!value)  #endif
             goto fail;  
         PyDict_SetItemString(dict, name, value);  
         Py_DECREF(value);  
     }  
   
     return dict;  
  fail:  
     Py_XDECREF(dict);  
     return NULL;  
 }  
   
 /* the write_record method. Write the record record given wither as a  
  * dictionary or a sequence (i.e. a list or a tuple).  
  *  
  * If it's a dictionary the keys must be the names of the fields and  
  * their value must have a suitable type. Only the fields actually  
  * contained in the dictionary are written. Fields for which there's no  
  * item in the dict are not modified.  
  *  
  * If it's a sequence, all fields must be present in the right order.  
  *  
  * In case of error, set a python exception and return NULL. Since that  
  * value will be returned to the python interpreter as is, the  
  * interpreter should recognize the exception.  
  *  
  * The method is implemented with two c-functions, write_field to write  
  * a single field and DBFInfo_write_record as the front-end.  
  */  
   
69    
70  /* write a single field of a record. */          if (!self->handle)
 static int  
 write_field(DBFHandle handle, int record, int field, int type,  
             PyObject * value)  
 {  
     char * string_value;  
     int int_value;  
     double double_value;  
   
     if (value == Py_None)  
     {  
         if (!DBFWriteNULLAttribute(handle, record, field))  
         {  
             PyErr_Format(PyExc_IOError,  
                          "can't write NULL field %d of record %d",  
                          field, record);  
             return 0;  
         }  
     }  
     else  
     {  
         switch (type)  
         {  
         case FTString:  
             string_value = PyString_AsString(value);  
             if (!string_value)  
                 return 0;  
             if (!DBFWriteStringAttribute(handle, record, field, string_value))  
             {  
                 PyErr_Format(PyExc_IOError,  
                              "can't write field %d of record %d",  
                              field, record);  
                 return 0;  
             }  
             break;  
   
         case FTInteger:  
             int_value = PyInt_AsLong(value);  
             if (int_value == -1 && PyErr_Occurred())  
                 return 0;  
             if (!DBFWriteIntegerAttribute(handle, record, field, int_value))  
             {  
                 PyErr_Format(PyExc_IOError,  
                              "can't write field %d of record %d",  
                              field, record);  
                 return 0;  
             }  
             break;  
   
         case FTDouble:  
             double_value = PyFloat_AsDouble(value);  
             if (double_value == -1 && PyErr_Occurred())  
                 return 0;  
             if (!DBFWriteDoubleAttribute(handle, record, field, double_value))  
             {  
                 PyErr_Format(PyExc_IOError,  
                              "can't write field %d of record %d",  
                              field, record);  
                 return 0;  
             }  
             break;  
   
         default:  
             PyErr_Format(PyExc_TypeError, "Invalid field data type %d", type);  
             return 0;  
         }  
     }  
   
     return 1;  
 }  
   
 static  
 PyObject *  
 DBFInfo_write_record(DBFHandle handle, int record, PyObject *record_object)  
 {  
     int num_fields;  
     int i, length;  
     int type, width;  
     char name[12];  
     PyObject * value = NULL;  
   
     num_fields = DBFGetFieldCount(handle);  
   
     /* We used to use PyMapping_Check to test whether record_object is a  
      * dictionary like object instead of PySequence_Check to test  
      * whether it's a sequence. Unfortunately in Python 2.3  
      * PyMapping_Check returns true for lists and tuples too so the old  
      * approach doesn't work anymore.  
      */  
     if (PySequence_Check(record_object))  
     {  
         /* It's a sequence object. Iterate through all items in the  
          * sequence and write them to the appropriate field.  
          */  
         length = PySequence_Length(record_object);  
         if (length != num_fields)  
         {  
             PyErr_SetString(PyExc_TypeError,  
                             "record must have one item for each field");  
             goto fail;  
         }  
         for (i = 0; i < length; i++)  
         {  
             type = DBFGetFieldInfo(handle, i, name, &width, NULL);  
             value = PySequence_GetItem(record_object, i);  
             if (value)  
             {  
                 if (!write_field(handle, record, i, type, value))  
                     goto fail;  
                 Py_DECREF(value);  
             }  
             else  
             {  
                 goto fail;  
             }  
         }  
     }  
     else  
     {  
         /* It's a dictionary-like object. Iterate over the names of the  
          * known fields and write the corresponding item  
          */  
         for (i = 0; i < num_fields; i++)  
71          {          {
72              type = DBFGetFieldInfo(handle, i, name, &width, NULL);                  if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|s:DBFFile", kwlist,
73                            Py_FileSystemDefaultEncoding, &file, &mode)) return -1;
74                    self->handle = DBFOpen(file, mode);
75    
76              /* if the dictionary has the key name write that object to                  if (!self->handle)
77               * the appropriate field, other wise just clear the python                  {
78               * exception and do nothing.                          PyErr_SetFromErrnoWithFilename(PyExc_IOError, file);
79               */                          PyMem_Free(file);
80              value = PyMapping_GetItemString(record_object, name);                          return -1;
81              if (value)                  }
             {  
                 if (!write_field(handle, record, i, type, value))  
                     goto fail;  
                 Py_DECREF(value);  
             }  
             else  
             {  
                 PyErr_Clear();  
             }  
         }  
     }  
   
     Py_INCREF(Py_None);  
     return Py_None;  
   
  fail:  
     Py_XDECREF(value);  
     return NULL;  
 }  
 %}  
   
   
 /*  
  * The SWIG Interface definition.  
  */  
   
 /* include some common SWIG type definitions and standard exception  
    handling code */  
 %include typemaps.i  
 %include exception.i  
   
 /* As for ShapeFile in shapelib.i, We define a new C-struct that holds  
  * the DBFHandle. This is mainly done so we can separate the close()  
  * method from the destructor but it also helps with exception handling.  
  *  
  * After the DBFFile has been opened or created the handle is not NULL.  
  * The close() method closes the file and sets handle to NULL as an  
  * indicator that the file has been closed.  
  */  
82    
83  %{                  PyMem_Free(file);
84      typedef struct {          }
85          DBFHandle handle;  
86      } DBFFile;          return 0;
87  %}  }
88    
89    
 /* The first argument to the DBFFile methods is a DBFFile pointer.  
  * We have to check whether handle is not NULL in most methods but not  
  * all. In the destructor and the close method, it's OK for handle to be  
  * NULL. We achieve this by checking whether the preprocessor macro  
  * NOCHECK_$name is defined. SWIG replaces $name with the name of the  
  * function for which the code is inserted. In the %{,%}-block below we  
  * define the macros for the destructor and the close() method.  
  */  
90    
91  %typemap(python,check) DBFFile *{  static PyObject* dbffile_close(DBFFileObject* self)
92  %#ifndef NOCHECK_$name  {
93      if (!$target || !$target->handle)          DBFClose(self->handle);
94          SWIG_exception(SWIG_TypeError, "dbffile already closed");          self->handle = NULL;
95  %#endif          Py_RETURN_NONE;
96  }  }
97    
 %{  
 #define NOCHECK_delete_DBFFile  
 #define NOCHECK_DBFFile_close  
 %}  
98    
99    
100  /* An exception handle for the constructor and the module level open()  static PyObject* dbffile_field_count(DBFFileObject* self)
101   * and create() functions.  {
102   *          return PyInt_FromLong((long)DBFGetFieldCount(self->handle));
  * Annoyingly, we *have* to put braces around the SWIG_exception()  
  * calls, at least in the python case, because of the way the macro is  
  * written. Of course, always putting braces around the branches of an  
  * if-statement is often considered good practice.  
  */  
 %typemap(python,except) DBFFile * {  
     $function;  
     if (!$source)  
     {  
         SWIG_exception(SWIG_MemoryError, "no memory");  
     }  
     else if (!$source->handle)  
     {  
         SWIG_exception(SWIG_IOError, "$name failed");  
     }  
103  }  }
104    
105    
 /* define and use some typemaps for the field_info() method whose  
  * C-implementation has three output parameters that are returned  
  * through pointers passed into the function. SWIG already has  
  * definitions for common types such as int* and we can use those for  
  * the last two parameters:  
  */  
   
 %apply int * OUTPUT { int * output_width }  
 %apply int * OUTPUT { int * output_decimals }  
106    
107  /* the fieldname has to be defined manually: */  static PyObject* dbffile_record_count(DBFFileObject* self)
108  %typemap(python,ignore) char *fieldname_out(char temp[12]) {  {
109      $target = temp;          return PyInt_FromLong((long)DBFGetRecordCount(self->handle));
110  }  }
111    
112  %typemap(python,argout) char *fieldname_out() {  
113      PyObject * string = PyString_FromString($source);  
114      $target = t_output_helper($target,string);  static PyObject* dbffile_field_info(DBFFileObject* self, PyObject* args)
115    {
116            char field_name[12];
117            int field, width = 0, decimals = 0, field_type;
118            
119            if (!PyArg_ParseTuple(args, "i:field_info", &field)) return NULL;
120            
121            field_name[0] = '\0';
122            field_type = DBFGetFieldInfo(self->handle, field, field_name, &width, &decimals);
123            
124            return Py_BuildValue("isii", field_type, field_name, width, decimals);
125  }  }
126    
127    
128    
129  /*  static PyObject* dbffile_add_field(DBFFileObject* self, PyObject* args)
130   * The SWIG-version of the DBFFile struct  {
131   */          char* name;
132            int type, width, decimals;
133            int field;
134            
135            if (!PyArg_ParseTuple(args, "siii:add_field", &name, &type, &width, &decimals)) return NULL;
136            
137            field = DBFAddField(self->handle, name, (DBFFieldType)type, width, decimals);
138            
139            if (field < 0)
140            {
141                    PyErr_SetString(PyExc_ValueError, "Failed to add field due to inappropriate field definition");
142                    return NULL;
143            }
144            return PyInt_FromLong((long)field);
145    }
146    
147    
148    
149  typedef struct  /* Read one attribute from the dbf handle and return it as a new python object
150    *
151    * If an error occurs, set the appropriate Python exception and return
152    * NULL.
153    *
154    * Assume that the values of the record and field arguments are valid.
155    * The name argument will be passed to DBFGetFieldInfo as is and should
156    * thus be either NULL or a pointer to an array of at least 12 chars
157    */
158    static PyObject* do_read_attribute(DBFHandle handle, int record, int field, char * name)
159  {  {
160      %addmethods {          int type, width;
161          DBFFile(const char *file, const char * mode = "rb") {          const char* temp;
162              DBFFile * self = malloc(sizeof(DBFFile));          type = DBFGetFieldInfo(handle, field, name, &width, NULL);
163              if (self)          
164                  self->handle = DBFOpen(file, mode);          /* For strings NULL and the empty string are indistinguishable
165              return self;          * in DBF files. We prefer empty strings instead for backwards
166            * compatibility reasons because older wrapper versions returned
167            * emtpy strings as empty strings.
168            */
169            if (type != FTString && DBFIsAttributeNULL(handle, record, field))
170            {
171                    Py_RETURN_NONE;
172          }          }
173                        else
174          ~DBFFile() {          {
175              if (self->handle)                  switch (type)
176                  DBFClose(self->handle);                  {
177              free(self);                  case FTString:
178                            temp = DBFReadStringAttribute(handle, record, field);
179                            if (temp) return PyString_FromString(temp);
180    
181                    case FTInteger:
182                            return PyInt_FromLong((long)DBFReadIntegerAttribute(handle, record, field));
183    
184                    case FTDouble:
185                            return PyFloat_FromDouble(DBFReadDoubleAttribute(handle, record, field));
186                            
187                    case FTLogical:
188                            temp = DBFReadLogicalAttribute(handle, record, field);
189                            if (temp)
190                            {
191                                    switch (temp[0])
192                                    {
193                                    case 'F':
194                                    case 'N':
195                                            Py_RETURN_FALSE;
196                                    case 'T':
197                                    case 'Y':
198                                            Py_RETURN_TRUE;
199                                    }
200                            }
201                            break;
202    
203                    default:
204                            PyErr_Format(PyExc_TypeError, "Invalid field data type %d", type);
205                            return NULL;
206                    }
207          }          }
208            
209            PyErr_Format(PyExc_IOError,     "Can't read value for row %d column %d", record, field);
210            return NULL;
211    }    
212    
213    
214    
215          void close() {  /* the read_attribute method. Return the value of the given record and
216              if (self->handle)  * field as a python object of the appropriate type.
217                  DBFClose(self->handle);  */
218              self->handle = NULL;  static PyObject* dbffile_read_attribute(DBFFileObject* self, PyObject* args)
219    {
220            int record, field;
221    
222            if (!PyArg_ParseTuple(args, "ii:read_field", &record, &field)) return NULL;
223            
224            if (record < 0 || record >= DBFGetRecordCount(self->handle))
225            {
226                    PyErr_Format(PyExc_ValueError,
227                                    "record index %d out of bounds (record count: %d)",
228                                    record, DBFGetRecordCount(self->handle));
229                    return NULL;
230          }          }
231    
232          int field_count() {          if (field < 0 || field >= DBFGetFieldCount(self->handle))
233              return DBFGetFieldCount(self->handle);          {
234                    PyErr_Format(PyExc_ValueError,
235                                    "field index %d out of bounds (field count: %d)",
236                                    field, DBFGetFieldCount(self->handle));
237                    return NULL;
238          }          }
239    
240          int record_count() {          return do_read_attribute(self->handle, record, field, NULL);
241              return DBFGetRecordCount(self->handle);  }
242    
243    
244    
245    /* the read_record method. Return the record record as a dictionary with
246    * whose keys are the names of the fields, and their values as the
247    * appropriate Python type.
248    */
249    static PyObject* dbffile_read_record(DBFFileObject* self, PyObject* args)
250    {
251            int record;
252            int num_fields;
253            int i;
254            char name[12];
255            PyObject *dict;
256            PyObject *value = NULL;
257    
258            if (!PyArg_ParseTuple(args, "i:read_record", &record)) return NULL;
259    
260            if (record < 0 || record >= DBFGetRecordCount(self->handle))
261            {
262                    PyErr_Format(PyExc_ValueError,
263                            "record index %d out of bounds (record count: %d)",
264                            record, DBFGetRecordCount(self->handle));
265                    return NULL;
266          }          }
267    
268          int field_info(int iField, char * fieldname_out,          dict = PyDict_New();
269                         int * output_width, int * output_decimals) {          if (!dict) return NULL;
270              return DBFGetFieldInfo(self->handle, iField, fieldname_out,          
271                                     output_width, output_decimals);          num_fields = DBFGetFieldCount(self->handle);
272            for (i = 0; i < num_fields; i++)
273            {
274                    value = do_read_attribute(self->handle, record, i, name);
275                    if (!value || PyDict_SetItemString(dict, name, value) < 0) goto fail;
276                    Py_DECREF(value);
277                    value = NULL;
278          }          }
279                
280          PyObject * read_record(int record) {          return dict;
281              return DBFInfo_read_record(self->handle, record);  
282    fail:
283            Py_XDECREF(value);
284            Py_DECREF(dict);
285            return NULL;
286    }
287    
288    
289    
290    /* write a single field of a record. */
291    static int do_write_field(DBFHandle handle, int record, int field, int type, PyObject* value)
292    {
293            char * string_value;
294            int int_value;
295            double double_value;
296            int logical_value;
297    
298            if (value == Py_None)
299            {
300                    if (DBFWriteNULLAttribute(handle, record, field)) return 1;
301          }          }
302            else
303            {
304                    switch (type)
305                    {
306                    case FTString:
307                            string_value = PyString_AsString(value);
308                            if (!string_value) return 0;
309                            if (DBFWriteStringAttribute(handle, record, field, string_value)) return 1;
310                            break;
311    
312                    case FTInteger:
313                            int_value = PyInt_AsLong(value);
314                            if (int_value == -1 && PyErr_Occurred()) return 0;
315                            if (DBFWriteIntegerAttribute(handle, record, field, int_value)) return 1;
316                            break;
317    
318                    case FTDouble:
319                            double_value = PyFloat_AsDouble(value);
320                            if (double_value == -1 && PyErr_Occurred()) return 0;
321                            if (DBFWriteDoubleAttribute(handle, record, field, double_value)) return 1;
322                            break;
323                            
324                    case FTLogical:
325                            logical_value = PyObject_IsTrue(value);
326                            if (logical_value == -1) return 0;
327                            if (DBFWriteLogicalAttribute(handle, record, field, logical_value ? 'T' : 'F')) return 1;
328                            break;
329    
330                    default:
331                            PyErr_Format(PyExc_TypeError, "Invalid field data type %d", type);
332                            return 0;
333                    }
334            }
335    
336            PyErr_Format(PyExc_IOError,     "can't write field %d of record %d", field, record);
337            return 0;
338    }
339    
340          int add_field(const char * pszFieldName, DBFFieldType eType,  
341                        int nWidth, int nDecimals) {  
342              return DBFAddField(self->handle, pszFieldName, eType, nWidth,  static PyObject* dbffile_write_field(DBFFileObject* self, PyObject* args)
343                                 nDecimals);  {
344            int record, field;
345            PyObject* value;
346            int type;
347    
348            if (!PyArg_ParseTuple(args, "iiO:write_field", &record, &field, &value)) return NULL;
349            
350            if (field < 0 || field >= DBFGetFieldCount(self->handle))
351            {
352                    PyErr_Format(PyExc_ValueError,
353                                    "field index %d out of bounds (field count: %d)",
354                                    field, DBFGetFieldCount(self->handle));
355                    return NULL;
356          }          }
357    
358          PyObject *write_record(int record, PyObject *dict_or_sequence) {          type = DBFGetFieldInfo(self->handle, field, NULL, NULL, NULL);
359              return DBFInfo_write_record(self->handle, record,          if (!do_write_field(self->handle, record, field, type, value)) return NULL;
360                                          dict_or_sequence);          Py_RETURN_NONE;
361    }
362    
363    
364    
365    static PyObject* dbffile_write_record(DBFFileObject* self, PyObject* args)
366    {
367            int record;
368            PyObject* record_object;
369            int i, num_fields;
370            
371            int type;
372            char name[12];
373            PyObject* value = NULL;
374            
375            if (!PyArg_ParseTuple(args, "iO:write_record", &record, &record_object)) return NULL;
376            
377            num_fields = DBFGetFieldCount(self->handle);
378            
379            /* mimic ShapeFile functionality where id = -1 means appending */
380            if (record == -1)
381            {
382                    record = num_fields;
383          }          }
384    
385          int commit() {          if (PySequence_Check(record_object))
386              return DBFCommit(self->handle);          {
387                    /* It's a sequence object. Iterate through all items in the
388                    * sequence and write them to the appropriate field.
389                    */
390                    if (PySequence_Length(record_object) != num_fields)
391                    {
392                            PyErr_SetString(PyExc_TypeError, "record must have one item for each field");
393                            return NULL;
394                    }
395                    for (i = 0; i < num_fields; ++i)
396                    {
397                            type = DBFGetFieldInfo(self->handle, i, NULL, NULL, NULL);
398                            value = PySequence_GetItem(record_object, i);
399                            if (!value) return NULL;
400                            if (!do_write_field(self->handle, record, i, type, value))
401                            {
402                                    Py_DECREF(value);
403                                    return NULL;
404                            }
405                            Py_DECREF(value);
406                    }
407          }          }
408            else
409            {
410                    /* It's a dictionary-like object. Iterate over the names of the
411                    * known fields and write the corresponding item
412                    */
413                    for (i = 0; i < num_fields; ++i)
414                    {
415                            name[0] = '\0';
416                            type = DBFGetFieldInfo(self->handle, i, name, NULL, NULL);
417                            value = PyDict_GetItemString(record_object, name);
418                            if (value && !do_write_field(self->handle, record, i, type, value)) return NULL;
419                    }
420            }
421            
422            return PyInt_FromLong((long)record);
423    }
424    
     }  
 } DBFFile;  
425    
426    
427  /*  static PyObject* dbffile_repr(DBFFileObject* self)
428   * Two module level functions, open() and create() that correspond to  {
429   * DBFOpen and DBFCreate respectively. open() is equivalent to the          /* TODO: it would be nice to do something like "dbflib.DBFFile(filename, mode)" instead */
430   * DBFFile constructor.          return PyString_FromFormat("<dbflib.DBFFile object at %p>", self->handle);
431   */  }
432    
433    
 %{  
     DBFFile * open_DBFFile(const char * file, const char * mode)  
     {  
         DBFFile * self = malloc(sizeof(DBFFile));  
         if (self)  
             self->handle = DBFOpen(file, mode);  
         return self;  
     }  
 %}  
434    
435  %name(open) %new DBFFile * open_DBFFile(const char * file,  /* The commit method implementation
436                                          const char * mode = "rb");  *
437    * The method relies on the DBFUpdateHeader method which is not
438    * available in shapelib <= 1.2.10.  setup.py defines
439    * HAVE_UPDATE_HEADER's value depending on whether the function is
440    * available in the shapelib version the code is compiled with.
441    */
442    #if HAVE_UPDATE_HEADER
443    static PyObject* dbffile_commit(DBFFileObject* self)
444    {
445            DBFUpdateHeader(self->handle);
446            Py_RETURN_NONE;
447    }
448    #endif
449    
450    
451    
452    static struct PyMethodDef dbffile_methods[] =
453    {
454            {"close", (PyCFunction)dbffile_close, METH_NOARGS,
455                    "close() -> None\n\n"
456                    "closes DBFFile"},
457            {"field_count", (PyCFunction)dbffile_field_count, METH_NOARGS,
458                    "field_count() -> integer\n\n"
459                    "returns number of fields currently defined"},
460            {"record_count", (PyCFunction)dbffile_record_count, METH_NOARGS,
461                    "record_count() -> integer\n\n"
462                    "returns number of records that currently exist"},
463            {"field_info", (PyCFunction)dbffile_field_info, METH_VARARGS,
464                    "field_info(field_index) -> (type, name, width, decimals)\n\n"
465                    "returns info of a field as a tuple with:\n"
466                    "- type: the type of the field corresponding to the integer value of one "
467                    " of the constants FTString, FTInteger, ...\n"
468                    "- name: the name of the field as a string\n"
469                    "- width: the width of the field as a number of characters\n"
470                    "- decimals: the number of decimal digits" },
471            {"add_field", (PyCFunction)dbffile_add_field, METH_VARARGS,
472                    "add_field(type, name, width, decimals) -> field_index\n\n"
473                    "adds a new field and returns field index if successful\n"
474                    "- type: the type of the field corresponding to the integer value of one "
475                    " of the constants FTString, FTInteger, ...\n"
476                    "- name: the name of the field as a string\n"
477                    "- width: the width of the field as a number of characters\n"
478                    "- decimals: the number of decimal digits" },
479            {"read_attribute", (PyCFunction)dbffile_read_attribute, METH_VARARGS,
480                    "read_attribute(record_index, field_index) -> value\n\n"
481                    "returns the value of one field of a record"},
482            {"read_record", (PyCFunction)dbffile_read_record, METH_VARARGS,
483                    "read_record(record_index) -> dict\n\n"
484                    "returns an entire record as a dictionary of field names and values"},
485            {"write_field", (PyCFunction)dbffile_write_field, METH_VARARGS,
486                    "write_field(record_index, field_index, new_value)\n"
487                    "writes a single field of a record"},
488            {"write_record", (PyCFunction)dbffile_write_record, METH_VARARGS,
489                    "write_record(record_index, record) -> record_index\n\n"
490                    "Writes an entire record as a dict or a sequence, and return index of record\n"
491                    "Record can either be a dictionary in which case the keys are used as field names, "
492                    "or a sequence that must have an item for every field (length = field_count())"},
493    #if HAVE_UPDATE_HEADER
494            {"commit", (PyCFunction)dbffile_commit, METH_NOARGS,
495                    "commit() -> None"},
496    #endif
497            {NULL}
498    };
499    
500    
501    
502    static struct PyGetSetDef dbffile_getsetters[] =
503    {
504            {NULL}
505    };
506    
507    
508    
509    static PyTypeObject DBFFileType = PYSHAPELIB_DEFINE_TYPE(DBFFileObject, dbffile, "shapelib.DBFFile", 0);
510    
511    
512    
513    /* --- dbflib -------------------------------------------------------------------------------------------------------- */
514    
515    static PyObject* dbflib_open(PyObject* module, PyObject* args)
516    {
517            return PyObject_CallObject((PyObject*)&DBFFileType, args);
518    }
519    
520    
521    
522    static PyObject* dbflib_create(PyObject* module, PyObject* args)
523    {
524            char* file;
525            DBFFileObject* result;
526            DBFHandle handle = NULL;
527            int wideargument = 0;
528    
529    #if defined(SHPAPI_HAS_WIDE) && defined(Py_WIN_WIDE_FILENAMES)
530            if (GetVersion() < 0x80000000) {    /* On NT, so wide API available */
531                    PyObject *wfile;
532                    if (PyArg_ParseTuple(args, "U:create", &wfile))
533                    {
534                            wideargument = 1;
535                            handle = DBFCreateW(PyUnicode_AS_UNICODE(wfile));
536                            if (!handle)
537                            {
538                                    PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, wfile);
539                                    return NULL;
540                            }
541                    }
542                    else
543                    {
544                            /* Drop the argument parsing error as narrow
545                               strings are also valid. */
546                            PyErr_Clear();
547                    }
548            }
549    #endif
550            
551            if (!handle)
552            {
553                    if (!PyArg_ParseTuple(args, "et:create", Py_FileSystemDefaultEncoding, &file)) return NULL;
554                    handle = DBFCreate(file);
555                    if (!handle)
556                    {
557                                    PyErr_SetFromErrnoWithFilename(PyExc_IOError, file);
558                                    PyMem_Free(file);
559                                    return NULL;
560                    }
561                    PyMem_Free(file);
562            }
563    
564            result = PyObject_New(DBFFileObject, &DBFFileType);
565            if (!result)
566            {
567                    DBFClose(handle);
568                    return PyErr_NoMemory();
569            }
570            
571            result->handle = handle;
572            return (PyObject*) result;
573    }
574    
575    
 %{  
     DBFFile * create_DBFFile(const char * file)  
     {  
         DBFFile * self = malloc(sizeof(DBFFile));  
         if (self)  
             self->handle = DBFCreate(file);  
         return self;  
     }  
 %}  
 %name(create) %new DBFFile * create_DBFFile(const char * file);  
576    
577    static struct PyMethodDef dbflib_methods[] =
578    {
579            {"open", (PyCFunction)dbflib_open, METH_VARARGS,
580                    "open(name [, mode]) -> DBFFile\n\n"
581                    "opens a DBFFile" },
582            {"create", (PyCFunction)dbflib_create, METH_VARARGS,
583                    "create(name) -> DBFFile\n\n"
584                    "create a DBFFile" },
585            {NULL}
586    };
587    
588    
589  /* constant definitions copied from shapefil.h */  
590  typedef enum {  PyMODINIT_FUNC initdbflib(void)
591    FTString,  {
592    FTInteger,          PyObject* module = Py_InitModule("dbflib", dbflib_methods);
593    FTDouble,          if (!module) return;
594    FTInvalid          
595  } DBFFieldType;          PYSHAPELIB_ADD_TYPE(DBFFileType, "DBFFile");
596            
597            PYSHAPELIB_ADD_CONSTANT(FTString);
598            PYSHAPELIB_ADD_CONSTANT(FTInteger);
599            PYSHAPELIB_ADD_CONSTANT(FTDouble);
600            PYSHAPELIB_ADD_CONSTANT(FTLogical);
601            PYSHAPELIB_ADD_CONSTANT(FTInvalid);
602            PyModule_AddIntConstant(module, "_have_commit", HAVE_UPDATE_HEADER);
603    }

Legend:
Removed from v.1611  
changed lines
  Added in v.2751

[email protected]
ViewVC Help
Powered by ViewVC 1.1.26