104 |
|
|
105 |
/* XXX: cache_keyring_names must be called then the GPG homedir changes! */ |
/* XXX: cache_keyring_names must be called then the GPG homedir changes! */ |
106 |
|
|
107 |
|
/* Initialize both cache contexts. Use @pubring for the public |
108 |
|
keyring and @secring for the secret keyring. */ |
109 |
gpgme_error_t |
gpgme_error_t |
110 |
keycache_init (const char *pubring, const char * secring) |
keycache_init (const char *pubring, const char *secring) |
111 |
{ |
{ |
112 |
struct progress_filter_s pfx; |
struct progress_filter_s pfx; |
113 |
gpgme_error_t err; |
gpgme_error_t err; |
114 |
int val = 0; |
int val = 0; |
115 |
char * p; |
char *p; |
116 |
|
|
117 |
if (secring != NULL) { |
if (secring != NULL) { |
118 |
free_if_alloc (gpg_secring); |
free_if_alloc (gpg_secring); |
149 |
if (val != 0) |
if (val != 0) |
150 |
progress_cleanup (&pfx); |
progress_cleanup (&pfx); |
151 |
return err; |
return err; |
152 |
} /* keycache_init */ |
} |
153 |
|
|
154 |
|
|
155 |
|
/* If @val = 1 indicate to reload the cache. */ |
156 |
void |
void |
157 |
keycache_set_reload( int yes ) |
keycache_set_reload (int val) |
158 |
{ |
{ |
159 |
reload = yes; |
reload = val; |
160 |
} /* keycache_set_reload */ |
} |
161 |
|
|
162 |
|
|
163 |
|
/* Return the reload cache flag. */ |
164 |
int |
int |
165 |
keycache_get_reload( void ) |
keycache_get_reload (void) |
166 |
{ |
{ |
167 |
return reload; |
return reload; |
168 |
} /* keycache_get_reload */ |
} |
169 |
|
|
170 |
|
|
171 |
|
/* Return the public cache context if @is_pub is set |
172 |
|
the secre cache context otherwise. */ |
173 |
gpg_keycache_t |
gpg_keycache_t |
174 |
keycache_get_ctx (int is_pub) |
keycache_get_ctx (int is_pub) |
175 |
{ |
{ |
219 |
} |
} |
220 |
|
|
221 |
|
|
222 |
|
/* Search the public key with @keyid as the keyid in the cache and |
223 |
|
return the item in @k. */ |
224 |
int |
int |
225 |
winpt_get_pubkey (const char *keyid, winpt_key_s *k) |
winpt_get_pubkey (const char *keyid, winpt_key_s *k) |
226 |
{ |
{ |
229 |
rc = get_key_from_cache (keyid, &k->ctx, &k->ext, 0); |
rc = get_key_from_cache (keyid, &k->ctx, &k->ext, 0); |
230 |
if (rc) |
if (rc) |
231 |
return rc; |
return rc; |
232 |
k->is_v3 = k->ctx->subkeys->pubkey_algo == GPGME_PK_RSA && strlen (k->ctx->subkeys->fpr) == 32; |
k->is_v3 = k->ctx->subkeys->pubkey_algo == GPGME_PK_RSA && |
233 |
|
strlen (k->ctx->subkeys->fpr) == 32; |
234 |
k->is_protected = k->ext->gloflags.is_protected; |
k->is_protected = k->ext->gloflags.is_protected; |
235 |
k->keyid = k->ctx->subkeys->keyid; |
k->keyid = k->ctx->subkeys->keyid; |
236 |
k->uid = k->ctx->uids->uid; |
k->uid = k->ctx->uids->uid; |
245 |
rc = get_key_from_cache (keyid, &k->ctx, &k->ext, 1); |
rc = get_key_from_cache (keyid, &k->ctx, &k->ext, 1); |
246 |
if (rc) |
if (rc) |
247 |
return rc; |
return rc; |
248 |
k->is_v3 = k->ctx->subkeys->pubkey_algo == GPGME_PK_RSA && strlen (k->ctx->subkeys->fpr) == 32; |
k->is_v3 = k->ctx->subkeys->pubkey_algo == GPGME_PK_RSA && |
249 |
|
strlen (k->ctx->subkeys->fpr) == 32; |
250 |
k->is_protected = k->ext->gloflags.is_protected; |
k->is_protected = k->ext->gloflags.is_protected; |
251 |
k->keyid = k->ctx->subkeys->keyid; |
k->keyid = k->ctx->subkeys->keyid; |
252 |
k->uid = k->ctx->uids->uid; |
k->uid = k->ctx->uids->uid; |
257 |
int |
int |
258 |
get_pubkey (const char *keyid, gpgme_key_t *ret_key) |
get_pubkey (const char *keyid, gpgme_key_t *ret_key) |
259 |
{ |
{ |
260 |
int rc = 0; |
int rc; |
261 |
|
|
262 |
if (pub && sec) |
if (pub && sec) |
263 |
rc = get_key_from_cache (keyid, ret_key, NULL, 0); |
rc = get_key_from_cache (keyid, ret_key, NULL, 0); |
270 |
int |
int |
271 |
get_seckey (const char *keyid, gpgme_key_t *ret_skey) |
get_seckey (const char *keyid, gpgme_key_t *ret_skey) |
272 |
{ |
{ |
273 |
int rc = 0; |
int rc; |
274 |
|
|
275 |
if (pub && sec) |
if (pub && sec) |
276 |
rc = get_key_from_cache (keyid, ret_skey, NULL, 1); |
rc = get_key_from_cache (keyid, ret_skey, NULL, 1); |