Author: mlschroe
Date: Thu Oct 9 14:47:05 2008
New Revision: 11279
URL: http://svn.opensuse.org/viewcvs/zypp?rev=11279&view=rev
Log:
- the big solv data change
- incompatible new file format
- repodata handles are solvable ids
- no more extra handles
- no need to call repodata_extend anymore
- work around solver dup repo priority bug, real fix follows soon
Added:
trunk/sat-solver/src/repopage.c
trunk/sat-solver/src/repopage.h
Modified:
trunk/sat-solver/src/CMakeLists.txt
trunk/sat-solver/src/dirpool.c
trunk/sat-solver/src/dirpool.h
trunk/sat-solver/src/knownid.h
trunk/sat-solver/src/pool.c
trunk/sat-solver/src/pooltypes.h
trunk/sat-solver/src/repo.c
trunk/sat-solver/src/repo.h
trunk/sat-solver/src/repo_helix.c
trunk/sat-solver/src/repo_helix.h
trunk/sat-solver/src/repo_solv.c
trunk/sat-solver/src/repodata.c
trunk/sat-solver/src/repodata.h
trunk/sat-solver/src/repopack.h
trunk/sat-solver/src/solvable.c
trunk/sat-solver/src/solver.c
trunk/sat-solver/testsuite/deptestomatic.c
trunk/sat-solver/testsuite/yps.c
trunk/sat-solver/tools/common_write.c
trunk/sat-solver/tools/dumpsolv.c
trunk/sat-solver/tools/helix2solv.c
trunk/sat-solver/tools/mergesolv.c
trunk/sat-solver/tools/repo_content.c
trunk/sat-solver/tools/repo_content.h
trunk/sat-solver/tools/repo_deltainfoxml.c
trunk/sat-solver/tools/repo_deltainfoxml.h
trunk/sat-solver/tools/repo_patchxml.c
trunk/sat-solver/tools/repo_patchxml.h
trunk/sat-solver/tools/repo_products.c
trunk/sat-solver/tools/repo_products.h
trunk/sat-solver/tools/repo_repomdxml.c
trunk/sat-solver/tools/repo_repomdxml.h
trunk/sat-solver/tools/repo_rpmdb.c
trunk/sat-solver/tools/repo_rpmdb.h
trunk/sat-solver/tools/repo_rpmmd.c
trunk/sat-solver/tools/repo_rpmmd.h
trunk/sat-solver/tools/repo_susetags.c
trunk/sat-solver/tools/repo_susetags.h
trunk/sat-solver/tools/repo_updateinfoxml.c
trunk/sat-solver/tools/repo_updateinfoxml.h
trunk/sat-solver/tools/repo_write.c
trunk/sat-solver/tools/repo_write.h
trunk/sat-solver/tools/repo_zyppdb.c
trunk/sat-solver/tools/rpmdb2solv.c
trunk/sat-solver/tools/rpms2solv.c
trunk/sat-solver/tools/susetags2solv.c
Modified: trunk/sat-solver/src/CMakeLists.txt
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/CMakeLists.txt?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/CMakeLists.txt (original)
+++ trunk/sat-solver/src/CMakeLists.txt Thu Oct 9 14:47:05 2008
@@ -2,14 +2,14 @@
SET(libsatsolver_SRCS
bitmap.c poolarch.c poolvendor.c poolid.c strpool.c dirpool.c
solver.c solverdebug.c repo_solv.c repo_helix.c evr.c pool.c
- queue.c repo.c repodata.c util.c policy.c fastlz.c solvable.c)
+ queue.c repo.c repodata.c repopage.c util.c policy.c solvable.c)
ADD_LIBRARY(satsolver STATIC ${libsatsolver_SRCS})
SET(libsatsolver_HEADERS
bitmap.h evr.h hash.h policy.h poolarch.h poolvendor.h pool.h
poolid.h pooltypes.h queue.h solvable.h solver.h solverdebug.h
- repo.h repodata.h repo_solv.h repo_helix.h util.h
+ repo.h repodata.h repopage.h repo_solv.h repo_helix.h util.h
strpool.h dirpool.h knownid.h)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
Modified: trunk/sat-solver/src/dirpool.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/dirpool.c?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/dirpool.c (original)
+++ trunk/sat-solver/src/dirpool.c Thu Oct 9 14:47:05 2008
@@ -21,6 +21,13 @@
}
void
+dirpool_free(Dirpool *dp)
+{
+ sat_free(dp->dirs);
+ sat_free(dp->dirtraverse);
+}
+
+void
dirpool_make_dirtraverse(Dirpool *dp)
{
Id parent, i, *dirtraverse;
Modified: trunk/sat-solver/src/dirpool.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/dirpool.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/dirpool.h (original)
+++ trunk/sat-solver/src/dirpool.h Thu Oct 9 14:47:05 2008
@@ -18,6 +18,8 @@
} Dirpool;
void dirpool_create(Dirpool *dp);
+void dirpool_free(Dirpool *dp);
+
void dirpool_make_dirtraverse(Dirpool *dp);
Id dirpool_add_dir(Dirpool *dp, Id parent, Id comp, int create);
Modified: trunk/sat-solver/src/knownid.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/knownid.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/knownid.h (original)
+++ trunk/sat-solver/src/knownid.h Thu Oct 9 14:47:05 2008
@@ -53,12 +53,19 @@
KNOWNID(ARCH_SRC, "src"),
KNOWNID(ARCH_NOSRC, "nosrc"),
KNOWNID(ARCH_NOARCH, "noarch"),
-KNOWNID(REPODATA_INFO, "repodata:info"),
-KNOWNID(REPODATA_EXTERNAL, "repodata:external"),
-KNOWNID(REPODATA_KEYS, "repodata:keys"),
-KNOWNID(REPODATA_LOCATION, "repodata:location"),
-KNOWNID(REPODATA_ADDEDFILEPROVIDES, "repodata:addedfileprovides"),
-KNOWNID(REPODATA_RPMDBCOOKIE, "repodata:rpmdbcookie"),
+
+KNOWNID(REPOSITORY_SOLVABLES, "repository:solvables"),
+KNOWNID(REPOSITORY_DELTAINFO, "repository:deltainfo"),
+
+/* sub-repository information, they will get loaded on demand */
+KNOWNID(REPOSITORY_EXTERNAL, "repository:external"),
+KNOWNID(REPOSITORY_KEYS, "repository:keys"),
+KNOWNID(REPOSITORY_LOCATION, "repository:location"),
+
+/* file provides already added to our solvables */
+KNOWNID(REPOSITORY_ADDEDFILEPROVIDES, "repository:addedfileprovides"),
+/* inode of the rpm database for rpm --rebuilddb detection */
+KNOWNID(REPOSITORY_RPMDBCOOKIE, "repository:rpmdbcookie"),
/* The void type is usable to encode one-valued attributes, they have
no associated data. This is useful to encode values which many solvables
@@ -81,7 +88,8 @@
KNOWNID(REPOKEY_TYPE_MD5, "repokey:type:md5"),
KNOWNID(REPOKEY_TYPE_SHA1, "repokey:type:sha1"),
KNOWNID(REPOKEY_TYPE_SHA256, "repokey:type:sha256"),
-KNOWNID(REPOKEY_TYPE_COUNTED, "repokey:type:counted"),
+KNOWNID(REPOKEY_TYPE_FIXARRAY, "repokey:type:fixarray"),
+KNOWNID(REPOKEY_TYPE_FLEXARRAY, "repokey:type:flexarray"),
KNOWNID(SOLVABLE_SUMMARY, "solvable:summary"),
KNOWNID(SOLVABLE_DESCRIPTION, "solvable:description"),
@@ -121,37 +129,38 @@
KNOWNID(SOLVABLE_PATCHCATEGORY, "solvable:patchcategory"),
KNOWNID(SOLVABLE_HEADEREND, "solvable:headerend"),
+/* stuff for solvables of type pattern */
KNOWNID(SOLVABLE_CATEGORY, "solvable:category"),
KNOWNID(SOLVABLE_INCLUDES, "solvable:includes"),
KNOWNID(SOLVABLE_EXTENDS, "solvable:extends"),
KNOWNID(SOLVABLE_ICON, "solvable:icon"),
KNOWNID(SOLVABLE_ORDER, "solvable:order"),
-KNOWNID(UPDATE_REBOOT, "update:reboot"), /* reboot suggested (kernel update) */
-KNOWNID(UPDATE_RESTART, "update:restart"), /* restart suggested (update stack update) */
-KNOWNID(UPDATE_RELOGIN, "update:relogin"), /* restart suggested (update stack update) */
+KNOWNID(UPDATE_REBOOT, "update:reboot"), /* reboot suggested (kernel update) */
+KNOWNID(UPDATE_RESTART, "update:restart"), /* restart suggested (update stack update) */
+KNOWNID(UPDATE_RELOGIN, "update:relogin"), /* restart suggested (update stack update) */
-KNOWNID(UPDATE_MESSAGE, "update:message"), /* restart suggested (update stack update) */
+KNOWNID(UPDATE_MESSAGE, "update:message"), /* restart suggested (update stack update) */
/* 'content' of patch, usually list of packages */
-KNOWNID(UPDATE_COLLECTION, "update:collection"), /* "name evr arch" */
-KNOWNID(UPDATE_COLLECTION_NAME, "update:collection:name"), /* name */
-KNOWNID(UPDATE_COLLECTION_EVR, "update:collection:evr"), /* epoch:version-release */
-KNOWNID(UPDATE_COLLECTION_ARCH, "update:collection:arch"), /* architecture */
-KNOWNID(UPDATE_COLLECTION_FILENAME, "update:collection:filename"), /* filename (of rpm) */
-KNOWNID(UPDATE_COLLECTION_FLAGS, "update:collection:flags"), /* reboot(1)/restart(2) suggested if this rpm gets updated */
-
- /* external references for the update */
-KNOWNID(UPDATE_REFERENCE_TYPE, "update:reference:type"), /* type, e.g. 'bugzilla' or 'cve' */
-KNOWNID(UPDATE_REFERENCE_HREF, "update:reference:href"), /* href, e.g. 'http://bugzilla...' */
-KNOWNID(UPDATE_REFERENCE_ID, "update:reference:id"), /* id, e.g. bug number */
-KNOWNID(UPDATE_REFERENCE_TITLE, "update:reference:title"), /* title, e.g. "the bla forz scribs on fuggle" */
+KNOWNID(UPDATE_COLLECTION, "update:collection"), /* "name evr arch" */
+KNOWNID(UPDATE_COLLECTION_NAME, "update:collection:name"), /* name */
+KNOWNID(UPDATE_COLLECTION_EVR, "update:collection:evr"), /* epoch:version-release */
+KNOWNID(UPDATE_COLLECTION_ARCH, "update:collection:arch"), /* architecture */
+KNOWNID(UPDATE_COLLECTION_FILENAME, "update:collection:filename"), /* filename (of rpm) */
+KNOWNID(UPDATE_COLLECTION_FLAGS, "update:collection:flags"), /* reboot(1)/restart(2) suggested if this rpm gets updated */
+
+KNOWNID(UPDATE_REFERENCE, "update:reference"), /* external references for the update */
+KNOWNID(UPDATE_REFERENCE_TYPE, "update:reference:type"), /* type, e.g. 'bugzilla' or 'cve' */
+KNOWNID(UPDATE_REFERENCE_HREF, "update:reference:href"), /* href, e.g. 'http://bugzilla...' */
+KNOWNID(UPDATE_REFERENCE_ID, "update:reference:id"), /* id, e.g. bug number */
+KNOWNID(UPDATE_REFERENCE_TITLE, "update:reference:title"), /* title, e.g. "the bla forz scribs on fuggle" */
/* name */
KNOWNID(PRODUCT_SHORTLABEL, "product:shortlabel"),
KNOWNID(PRODUCT_DISTPRODUCT, "product:distproduct"),
KNOWNID(PRODUCT_DISTVERSION, "product:distversion"),
KNOWNID(PRODUCT_TYPE, "product:type"),
-KNOWNID(PRODUCT_URL, "product:url"),
+KNOWNID(PRODUCT_URL, "product:url"),
KNOWNID(PRODUCT_URL_TYPE, "product:url:type"),
KNOWNID(PRODUCT_FLAGS, "product:flags"),
KNOWNID(PRODUCT_PRODUCTLINE, "product:productline"),
@@ -163,30 +172,30 @@
KNOWNID(SUSETAGS_DATADIR, "susetags:datadir"),
/* timestamp then the repository was generated */
-KNOWNID(REPOSITORY_TIMESTAMP, "repository:timestamp"),
+KNOWNID(REPOSITORY_TIMESTAMP, "repository:timestamp"),
/* hint when the metadata could be outdated
w/respect to generated timestamp */
-KNOWNID(REPOSITORY_EXPIRE, "repository:expire"),
+KNOWNID(REPOSITORY_EXPIRE, "repository:expire"),
/* which things does this repo provides updates for, if it does */
-KNOWNID(REPOSITORY_UPDATES, "repository:updates"),
+KNOWNID(REPOSITORY_UPDATES, "repository:updates"),
/* which products this repository is supposed to be for */
-KNOWNID(REPOSITORY_PRODUCTS, "repository:products"),
+KNOWNID(REPOSITORY_PRODUCTS, "repository:products"),
/* keyword (tags) for this repository */
-KNOWNID(REPOSITORY_KEYWORDS, "repository:keywords"),
+KNOWNID(REPOSITORY_KEYWORDS, "repository:keywords"),
KNOWNID(DELTA_PACKAGE_NAME, "delta:pkgname"),
-KNOWNID(DELTA_PACKAGE_EVR, "delta:pkgevr"),
-KNOWNID(DELTA_PACKAGE_ARCH, "delta:pkgarch"),
+KNOWNID(DELTA_PACKAGE_EVR, "delta:pkgevr"),
+KNOWNID(DELTA_PACKAGE_ARCH, "delta:pkgarch"),
KNOWNID(DELTA_LOCATION_DIR, "delta:locdir"),
KNOWNID(DELTA_LOCATION_NAME, "delta:locname"),
KNOWNID(DELTA_LOCATION_EVR, "delta:locevr"),
KNOWNID(DELTA_LOCATION_SUFFIX, "delta:locsuffix"),
KNOWNID(DELTA_DOWNLOADSIZE, "delta:downloadsize"),
-KNOWNID(DELTA_CHECKSUM, "delta:checksum"),
-KNOWNID(DELTA_BASE_EVR, "delta:baseevr"),
-KNOWNID(DELTA_SEQ_NAME, "delta:seqname"),
-KNOWNID(DELTA_SEQ_EVR, "delta:seqevr"),
-KNOWNID(DELTA_SEQ_NUM, "delta:seqnum"),
+KNOWNID(DELTA_CHECKSUM, "delta:checksum"),
+KNOWNID(DELTA_BASE_EVR, "delta:baseevr"),
+KNOWNID(DELTA_SEQ_NAME, "delta:seqname"),
+KNOWNID(DELTA_SEQ_EVR, "delta:seqevr"),
+KNOWNID(DELTA_SEQ_NUM, "delta:seqnum"),
KNOWNID(ID_NUM_INTERNAL, 0)
Modified: trunk/sat-solver/src/pool.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/pool.c?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/pool.c (original)
+++ trunk/sat-solver/src/pool.c Thu Oct 9 14:47:05 2008
@@ -810,16 +810,26 @@
return 0;
}
+static int
+addfileprovides_setid_cb(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
+{
+ Map *provideids = cbdata;
+ if (key->type != REPOKEY_TYPE_IDARRAY)
+ return 0;
+ MAPSET(provideids, kv->id);
+ return kv->eof ? SEARCH_NEXT_SOLVABLE : 0;
+}
+
static void
pool_addfileprovides_search(Pool *pool, struct addfileprovides_cbdata *cbd, struct searchfiles *sf, Repo *repoonly)
{
- Id p, start, end, *idp;
+ Id p, start, end;
Solvable *s;
Repodata *data = 0, *nextdata;
Repo *oldrepo = 0;
int dataincludes = 0;
- int i;
+ int i, j;
Map providedids;
cbd->nfiles = sf->nfiles;
@@ -842,6 +852,7 @@
{
if (!s->repo || (repoonly && s->repo != repoonly))
continue;
+ /* check if p is in (oldrepo,data) */
if (s->repo != oldrepo || (data && p >= data->end))
{
data = 0;
@@ -849,28 +860,35 @@
}
if (oldrepo == 0)
{
+ /* nope, find new repo/repodata */
+ /* if we don't find a match, set data to the next repodata */
nextdata = 0;
for (i = 0, data = s->repo->repodata; i < s->repo->nrepodata; i++, data++)
{
- if (!data->addedfileprovides || p >= data->end)
+ if (p >= data->end)
+ continue;
+ if (data->state != REPODATA_AVAILABLE)
continue;
+ for (j = 1; j < data->nkeys; j++)
+ if (data->keys[j].name == REPOSITORY_ADDEDFILEPROVIDES && data->keys[j].type == REPOKEY_TYPE_IDARRAY)
+ break;
+ if (j == data->nkeys)
+ continue;
+ /* great, this repodata contains addedfileprovides */
if (!nextdata || nextdata->start > data->start)
nextdata = data;
if (p >= data->start)
break;
}
if (i == s->repo->nrepodata)
- data = nextdata;
+ data = nextdata; /* no direct hit, use next repodata */
if (data)
{
map_init(&providedids, pool->ss.nstrings);
- for (idp = data->addedfileprovides; *idp; idp++)
- MAPSET(&providedids, *idp);
+ repodata_search(data, REPOENTRY_META, REPOSITORY_ADDEDFILEPROVIDES, addfileprovides_setid_cb, &providedids);
for (i = 0; i < cbd->nfiles; i++)
if (!MAPTST(&providedids, cbd->ids[i]))
- {
- break;
- }
+ break;
map_free(&providedids);
dataincludes = i == cbd->nfiles;
}
@@ -1314,7 +1332,7 @@
continue;
if (!MAPTST(installedmap, sp))
continue;
- change += repo_lookup_num(s, SOLVABLE_INSTALLSIZE);
+ change += solvable_lookup_num(s, SOLVABLE_INSTALLSIZE, 0);
}
if (oldinstalled)
{
@@ -1322,7 +1340,7 @@
{
if (MAPTST(installedmap, sp))
continue;
- change -= repo_lookup_num(s, SOLVABLE_INSTALLSIZE);
+ change -= solvable_lookup_num(s, SOLVABLE_INSTALLSIZE, 0);
}
}
return change;
Modified: trunk/sat-solver/src/pooltypes.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/pooltypes.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/pooltypes.h (original)
+++ trunk/sat-solver/src/pooltypes.h Thu Oct 9 14:47:05 2008
@@ -22,6 +22,7 @@
#define SOLV_VERSION_5 5
#define SOLV_VERSION_6 6
#define SOLV_VERSION_7 7
+#define SOLV_VERSION_8 8
/* The format of .solv files might change incompatibly, and that is described
by the above version number. But sometimes we also extend the emitted
Modified: trunk/sat-solver/src/repo.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repo.c?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repo.c (original)
+++ trunk/sat-solver/src/repo.c Thu Oct 9 14:47:05 2008
@@ -665,11 +665,14 @@
domatch_idarray(Solvable *s, Id keyname, struct matchdata *md, Id *ida)
{
KeyValue kv;
+ kv.entry = 0;
+ kv.parent = 0;
for (; *ida && !md->stop; ida++)
{
kv.id = *ida;
kv.eof = ida[1] ? 0 : 1;
repo_matchvalue(md, s, 0, solvablekeys + (keyname - SOLVABLE_NAME), &kv);
+ kv.entry++;
}
}
@@ -682,6 +685,7 @@
int i, j, flags;
Solvable *s;
+ kv.parent = 0;
md->stop = 0;
if (!p)
{
@@ -795,6 +799,8 @@
{
if (p < data->start || p >= data->end)
continue;
+ if (keyname && !repodata_precheck_keyname(data, keyname))
+ continue;
if (data->state == REPODATA_STUB)
{
if (keyname)
@@ -813,7 +819,7 @@
}
if (data->state == REPODATA_ERROR)
continue;
- repodata_search(data, p - data->start, keyname, repo_matchvalue, md);
+ repodata_search(data, p, keyname, repo_matchvalue, md);
if (md->stop > SEARCH_NEXT_KEY)
break;
}
@@ -834,33 +840,33 @@
}
const char *
-repo_lookup_str(Solvable *s, Id key)
+repo_lookup_str(Repo *repo, Id entry, Id keyname)
{
- Repo *repo = s->repo;
Pool *pool = repo->pool;
Repodata *data;
- int i, j, n;
+ int i, j;
- switch(key)
+ switch(keyname)
{
case SOLVABLE_NAME:
- return id2str(pool, s->name);
+ return id2str(pool, pool->solvables[entry].name);
case SOLVABLE_ARCH:
- return id2str(pool, s->arch);
+ return id2str(pool, pool->solvables[entry].arch);
case SOLVABLE_EVR:
- return id2str(pool, s->evr);
+ return id2str(pool, pool->solvables[entry].evr);
case SOLVABLE_VENDOR:
- return id2str(pool, s->vendor);
+ return id2str(pool, pool->solvables[entry].vendor);
}
- n = s - pool->solvables;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (n < data->start || n >= data->end)
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
continue;
for (j = 1; j < data->nkeys; j++)
{
- if (data->keys[j].name == key && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID || data->keys[j].type == REPOKEY_TYPE_STR))
- return repodata_lookup_str(data, n - data->start, j);
+ if (data->keys[j].name == keyname && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID || data->keys[j].type == REPOKEY_TYPE_STR))
+ return repodata_lookup_str(data, entry, keyname);
}
}
return 0;
@@ -868,67 +874,127 @@
unsigned int
-repo_lookup_num(Solvable *s, Id key)
+repo_lookup_num(Repo *repo, Id entry, Id keyname, unsigned int notfound)
{
- Repo *repo = s->repo;
- Pool *pool = repo->pool;
Repodata *data;
- int i, j, n;
+ int i, j;
- if (key == RPM_RPMDBID)
+ if (keyname == RPM_RPMDBID)
{
- if (repo->rpmdbid)
- return repo->rpmdbid[(s - pool->solvables) - repo->start];
- return 0;
+ if (repo->rpmdbid && entry && entry >= repo->start && entry < repo->end)
+ return repo->rpmdbid[entry - repo->start];
+ return notfound;
}
- n = s - pool->solvables;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (n < data->start || n >= data->end)
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
continue;
for (j = 1; j < data->nkeys; j++)
{
- if (data->keys[j].name == key
+ if (data->keys[j].name == keyname
&& (data->keys[j].type == REPOKEY_TYPE_U32
|| data->keys[j].type == REPOKEY_TYPE_NUM
|| data->keys[j].type == REPOKEY_TYPE_CONSTANT))
{
unsigned value;
- if (repodata_lookup_num(data, n - data->start, j, &value))
+ if (repodata_lookup_num(data, entry, keyname, &value))
return value;
}
}
}
- return 0;
+ return notfound;
}
+Id
+repo_lookup_id(Repo *repo, Id entry, Id keyname)
+{
+ Repodata *data;
+ int i, j;
-/*
- * generic attribute lookup
- * returns non-zero if found
- * zero if not found
- * (XXX: return value is broken atm!)
- */
+ switch(keyname)
+ {
+ case SOLVABLE_NAME:
+ return repo->pool->solvables[entry].name;
+ case SOLVABLE_ARCH:
+ return repo->pool->solvables[entry].arch;
+ case SOLVABLE_EVR:
+ return repo->pool->solvables[entry].evr;
+ case SOLVABLE_VENDOR:
+ return repo->pool->solvables[entry].vendor;
+ }
+ for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
+ {
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
+ continue;
+ for (j = 1; j < data->nkeys; j++)
+ {
+ if (data->keys[j].name == keyname && (data->keys[j].type == REPOKEY_TYPE_ID || data->keys[j].type == REPOKEY_TYPE_CONSTANTID))
+ {
+ Id id = repodata_lookup_id(data, entry, keyname);
+ if (id)
+ {
+ if (data->localpool)
+ id = repodata_globalize_id(data, id);
+ return id;
+ }
+ }
+ }
+ }
+ return 0;
+}
-int
-repo_lookup(Solvable *s, Id key, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata)
+const unsigned char *
+repo_lookup_bin_checksum(Repo *repo, Id entry, Id keyname, Id *typep)
{
- Repo *repo = s->repo;
- Pool *pool = repo->pool;
Repodata *data;
- int i, s_id;
-
- s_id = s - pool->solvables;
+ int i, j;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
{
- if (s_id < data->start || s_id >= data->end)
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
continue;
- repodata_search(data, s_id - data->start, key, callback, cbdata);
- return 1;
+ for (j = 1; j < data->nkeys; j++)
+ {
+ if (data->keys[j].name == keyname)
+ {
+ const unsigned char *chk = repodata_lookup_bin_checksum(data, entry, keyname, typep);
+ if (chk)
+ return chk;
+ }
+ }
}
+ *typep = 0;
return 0;
}
+int
+repo_lookup_void(Repo *repo, Id entry, Id keyname)
+{
+ Repodata *data;
+ int i, j;
+ for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
+ {
+ if (entry && (entry < data->start || entry >= data->end))
+ continue;
+ if (!repodata_precheck_keyname(data, keyname))
+ continue;
+ for (j = 1; j < data->nkeys; j++)
+ {
+ if (data->keys[j].name == keyname
+ && (data->keys[j].type == REPOKEY_TYPE_VOID))
+ {
+ if (repodata_lookup_void(data, entry, keyname))
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
/***********************************************************************/
@@ -944,99 +1010,49 @@
return data;
}
-static Repodata *
-repo_findrepodata(Repo *repo, Id p, Id keyname)
+Repodata *
+repo_last_repodata(Repo *repo)
{
int i;
- Repodata *data;
-
- /* FIXME: enter nice code here */
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- if ((p < 0 && (-1 - p) >= data->extrastart && (-1 - p) < (data->extrastart + data->nextra))
- || (p >= 0 && p >= data->start && p < data->end))
- return data;
- if (p < 0)
- {
- data = repo->repodata;
- if (data)
- {
- for (i = 1; i < repo->nrepodata; i++)
- if (data->extrastart + data->nextra
- > repo->repodata[i].extrastart + repo->repodata[i].nextra)
- data = repo->repodata + i;
- }
- else
- data = repo_add_repodata(repo, 0);
- repodata_extend_extra(data, (-1 - p) - data->extrastart + 1);
- if (-p > repo->nextra)
- repo->nextra = -p;
- return data;
- }
- for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- if (p == data->end)
- break;
- if (i < repo->nrepodata)
- {
- repodata_extend(data, p);
- return data;
- }
+ for (i = repo->nrepodata - 1; i >= 0; i--)
+ if (repo->repodata[i].state != REPODATA_STUB)
+ return repo->repodata + i;
return repo_add_repodata(repo, 0);
}
void
repo_set_id(Repo *repo, Id p, Id keyname, Id id)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- /* This is -1 - ((-1 - p) - data->extrastart). */
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_id(data, repodata_get_handle(data, p), keyname, id);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_id(data, p, keyname, id);
}
void
repo_set_num(Repo *repo, Id p, Id keyname, Id num)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_num(data, repodata_get_handle(data, p), keyname, num);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_num(data, p, keyname, num);
}
void
repo_set_str(Repo *repo, Id p, Id keyname, const char *str)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_str(data, repodata_get_handle(data, p), keyname, str);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_str(data, p, keyname, str);
}
void
repo_set_poolstr(Repo *repo, Id p, Id keyname, const char *str)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_set_poolstr(data, repodata_get_handle(data, p), keyname, str);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_set_poolstr(data, p, keyname, str);
}
void
repo_add_poolstr_array(Repo *repo, Id p, Id keyname, const char *str)
{
- Repodata *data = repo_findrepodata(repo, p, keyname);
- if (p < 0)
- p = p + data->extrastart;
- else
- p = p - data->start;
- repodata_add_poolstr_array(data, repodata_get_handle(data, p), keyname, str);
+ Repodata *data = repo_last_repodata(repo);
+ repodata_add_poolstr_array(data, p, keyname, str);
}
void
@@ -1046,7 +1062,7 @@
Repodata *data;
for (i = 0, data = repo->repodata; i < repo->nrepodata; i++, data++)
- if (data->attrs || data->extraattrs)
+ if (data->attrs || data->xattrs)
repodata_internalize(data);
}
Modified: trunk/sat-solver/src/repo.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repo.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repo.h (original)
+++ trunk/sat-solver/src/repo.h Thu Oct 9 14:47:05 2008
@@ -151,7 +151,11 @@
const char *str;
int num;
int num2;
- int eof;
+
+ int entry; /* array entry, starts with 0 */
+ int eof; /* last entry reached */
+
+ struct _KeyValue *parent;
} KeyValue;
/* search flags */
@@ -160,13 +164,16 @@
#define SEARCH_SUBSTRING 2
#define SEARCH_GLOB 3
#define SEARCH_REGEX 4
+#define SEARCH_ERROR 5
#define SEARCH_NOCASE (1<<8)
#define SEARCH_NO_STORAGE_SOLVABLE (1<<9)
#define SEARCH_EXTRA (1<<10)
+#define SEARCH_SUB (1<<10)
#define SEARCH_ALL_REPOS (1<<11)
#define SEARCH_SKIP_KIND (1<<12)
+
/* By default we don't match in attributes representing filelists
because the construction of those strings is costly. Specify this
flag if you want this. In that case kv->str will contain the full
@@ -174,19 +181,28 @@
#define SEARCH_FILES (1<<13)
/* Internal */
-#define __SEARCH_ONESOLVABLE (1 << 31)
+#define SEARCH_THISENTRY (1<<31)
+
+
+/* standard flags used in the repo_add functions */
+#define REPO_REUSE_REPODATA (1 << 0)
+#define REPO_NO_INTERNALIZE (1 << 1)
Repodata *repo_add_repodata(Repo *repo, int localpool);
+Repodata *repo_last_repodata(Repo *repo);
void repo_search(Repo *repo, Id p, Id key, const char *match, int flags, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata);
/* returns the string value of the attribute, or NULL if not found */
-const char * repo_lookup_str(Solvable *s, Id key);
+const char *repo_lookup_str(Repo *repo, Id entry, Id key);
/* returns the integer value of the attribute, or 0 if not found */
-unsigned int repo_lookup_num(Solvable *s, Id key);
-/* generic attribute lookup */
-int repo_lookup(Solvable *s, Id key, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata);
+unsigned int repo_lookup_num(Repo *repo, Id entry, Id key, unsigned int notfound);
+Id repo_lookup_id(Repo *repo, Id entry, Id keyid);
+int repo_lookup_void(Repo *repo, Id entry, Id keyid);
+const unsigned char *repo_lookup_bin_checksum(Repo *repo, Id entry, Id keyid, Id *typep);
+
+#if 0
typedef struct _Dataiterator
{
Repodata *data;
@@ -208,6 +224,53 @@
int subnum;
Id subschema;
} Dataiterator;
+#else
+
+typedef struct _Datamatcher {
+ Pool *pool;
+ int flags;
+ void *match;
+ int error;
+} Datamatcher;
+
+typedef struct _Dataiterator
+{
+ int state;
+ int flags;
+
+ Pool *pool;
+ Repo *repo;
+ Repodata *data;
+
+ /* data pointers */
+ unsigned char *dp;
+ unsigned char *ddp;
+ Id *idp;
+ Id *keyp;
+
+ /* the result */
+ Repokey *key;
+ KeyValue kv;
+
+ /* our matcher */
+ Datamatcher matcher;
+
+ /* iterators/filters */
+ Id keyname;
+ Id repodataid;
+ Id entry;
+ Id repoid;
+
+ /* recursion data */
+ struct di_parent {
+ KeyValue kv;
+ unsigned char *dp;
+ Id *keyp;
+ } parents[3];
+ int nparents;
+} Dataiterator;
+
+#endif
/* Use these like:
Dataiterator di;
Modified: trunk/sat-solver/src/repo_helix.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repo_helix.c?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repo_helix.c (original)
+++ trunk/sat-solver/src/repo_helix.c Thu Oct 9 14:47:05 2008
@@ -396,6 +396,8 @@
pd->depth++;
/* find node name in stateswitch */
+ if (!pd->swtab[pd->state])
+ return;
for (sw = pd->swtab[pd->state]; sw->from == pd->state; sw++)
{
if (!strcmp(sw->ename, name))
@@ -437,9 +439,6 @@
case STATE_PACKAGE: /* solvable name */
pd->solvable = pool_id2solvable(pool, repo_add_solvable(pd->repo));
- if (pd->data)
- repodata_extend(pd->data, pd->solvable - pool->solvables);
-
if (!strcmp(name, "selection"))
pd->kind = "selection";
else if (!strcmp(name, "pattern"))
@@ -721,7 +720,7 @@
case STATE_BUILDTIME:
t = atoi (pd->content);
if (t)
- repodata_set_num(pd->data, repodata_get_handle(pd->data, (s - pool->solvables) - pd->repo->start), SOLVABLE_BUILDTIME, t);
+ repodata_set_num(pd->data, s - pool->solvables, SOLVABLE_BUILDTIME, t);
break;
case STATE_UPDATE: /* new version, keeping all other metadata */
evr = evr2id(pool, pd,
@@ -813,20 +812,19 @@
*/
void
-repo_add_helix(Repo *repo, FILE *fp)
+repo_add_helix(Repo *repo, FILE *fp, int flags)
{
Pool *pool = repo->pool;
Parsedata pd;
- Repodata *data = 0;
+ Repodata *data;
char buf[BUFF_SIZE];
int i, l;
struct stateswitch *sw;
- if (repo->nrepodata)
- /* use last repodata */
- data = repo->repodata + repo->nrepodata - 1;
- else
+ if (!(flags & REPO_REUSE_REPODATA))
data = repo_add_repodata(repo, 0);
+ else
+ data = repo_last_repodata(repo);
/* prepare parsedata */
memset(&pd, 0, sizeof(pd));
@@ -869,10 +867,9 @@
break;
}
XML_ParserFree(parser);
-
- if (pd.data)
- repodata_internalize(pd.data);
-
free(pd.content);
free(pd.evrspace);
+
+ if (!(flags & REPO_NO_INTERNALIZE))
+ repodata_internalize(data);
}
Modified: trunk/sat-solver/src/repo_helix.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repo_helix.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repo_helix.h (original)
+++ trunk/sat-solver/src/repo_helix.h Thu Oct 9 14:47:05 2008
@@ -21,7 +21,7 @@
#include "pool.h"
#include "repo.h"
-extern void repo_add_helix(Repo *repo, FILE *fp);
+extern void repo_add_helix(Repo *repo, FILE *fp, int flags);
#ifdef __cplusplus
}
Modified: trunk/sat-solver/src/repo_solv.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repo_solv.c?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repo_solv.c (original)
+++ trunk/sat-solver/src/repo_solv.c Thu Oct 9 14:47:05 2008
@@ -27,6 +27,7 @@
#include "util.h"
#include "repopack.h"
+#include "repopage.h"
#define INTERESTED_START SOLVABLE_NAME
#define INTERESTED_END SOLVABLE_ENHANCES
@@ -40,8 +41,13 @@
static Pool *mypool; /* for pool_debug... */
-/*-----------------------------------------------------------------*/
-/* .solv read functions */
+
+static void repodata_load_stub(Repodata *data);
+
+
+/*******************************************************************************
+ * functions to extract data from a file handle
+ */
/*
* read u32
@@ -132,17 +138,11 @@
}
-/*
- * read array of Ids
- */
-
-#if 0
static Id *
-read_rel_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end, Id marker)
+read_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end)
{
unsigned int x = 0;
int c;
- Id old = 0;
if (data->error)
return 0;
@@ -161,30 +161,9 @@
continue;
}
x = (x << 6) | (c & 63);
- if (x == 0)
- {
- /* marker hack */
- if (store == end)
- {
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n");
- data->error = SOLV_ERROR_OVERFLOW;
- return 0;
- }
- if (c != 0x40)
- {
- *store++ = 0;
- return store;
- }
- *store++ = marker; /* do not map! */
- old = 0;
- x = 0;
- continue;
- }
- x = (x - 1) + old;
- old = x;
if (max && x >= max)
{
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: id too large (%u/%u)\n", x, max);
+ pool_debug(mypool, SAT_ERROR, "read_idarray: id too large (%u/%u)\n", x, max);
data->error = SOLV_ERROR_ID_RANGE;
return 0;
}
@@ -192,7 +171,7 @@
x = map[x];
if (store == end)
{
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n");
+ pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
return 0;
}
*store++ = x;
@@ -202,7 +181,7 @@
return store;
if (store == end)
{
- pool_debug(mypool, SAT_ERROR, "read_rel_idarray: array overflow\n");
+ pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
data->error = SOLV_ERROR_OVERFLOW;
return 0;
}
@@ -212,7 +191,15 @@
x = 0;
}
}
-#endif
+
+
+/*******************************************************************************
+ * functions to extract data from memory
+ */
+
+/*
+ * read array of Ids
+ */
static inline unsigned char *
data_read_id_max(unsigned char *dp, Id *ret, Id *map, int max, int *error)
@@ -306,308 +293,12 @@
}
-static Id *
-read_idarray(Repodata *data, Id max, Id *map, Id *store, Id *end)
-{
- unsigned int x = 0;
- int c;
-
- if (data->error)
- return 0;
- for (;;)
- {
- c = getc(data->fp);
- if (c == EOF)
- {
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
- return 0;
- }
- if ((c & 128) != 0)
- {
- x = (x << 7) ^ c ^ 128;
- continue;
- }
- x = (x << 6) | (c & 63);
- if (max && x >= max)
- {
- pool_debug(mypool, SAT_ERROR, "read_idarray: id too large (%u/%u)\n", x, max);
- data->error = SOLV_ERROR_ID_RANGE;
- return 0;
- }
- if (map)
- x = map[x];
- if (store == end)
- {
- pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
- return 0;
- }
- *store++ = x;
- if ((c & 64) == 0)
- {
- if (x == 0) /* already have trailing zero? */
- return store;
- if (store == end)
- {
- pool_debug(mypool, SAT_ERROR, "read_idarray: array overflow\n");
- data->error = SOLV_ERROR_OVERFLOW;
- return 0;
- }
- *store++ = 0;
- return store;
- }
- x = 0;
- }
-}
-
-static void
-read_str(Repodata *data, char **inbuf, unsigned *len)
-{
- unsigned char *buf = (unsigned char*)*inbuf;
- if (!buf)
- {
- buf = sat_malloc(1024);
- *len = 1024;
- }
- int c;
- unsigned ofs = 0;
- while((c = getc(data->fp)) != 0)
- {
- if (c == EOF)
- {
- pool_debug (mypool, SAT_ERROR, "unexpected EOF\n");
- data->error = SOLV_ERROR_EOF;
- return;
- }
- /* Plus 1 as we also want to add the 0. */
- if (ofs + 1 >= *len)
- {
- *len += 256;
- /* Don't realloc on the inbuf, it might be on the stack. */
- if (buf == (unsigned char*)*inbuf)
- {
- buf = sat_malloc(*len);
- memcpy(buf, *inbuf, *len - 256);
- }
- else
- buf = sat_realloc(buf, *len);
- }
- buf[ofs++] = c;
- }
- buf[ofs++] = 0;
- *inbuf = (char*)buf;
-}
-
-static void
-skip_item(Repodata *data, unsigned type, unsigned numid, unsigned numrel)
-{
- switch (type)
- {
- case REPOKEY_TYPE_VOID:
- case REPOKEY_TYPE_CONSTANT:
- case REPOKEY_TYPE_CONSTANTID:
- break;
- case REPOKEY_TYPE_ID:
- read_id(data, numid + numrel); /* just check Id */
- break;
- case REPOKEY_TYPE_DIR:
- read_id(data, numid + data->dirpool.ndirs); /* just check Id */
- break;
- case REPOKEY_TYPE_NUM:
- read_id(data, 0);
- break;
- case REPOKEY_TYPE_U32:
- read_u32(data);
- break;
- case REPOKEY_TYPE_STR:
- while (read_u8(data) != 0)
- ;
- break;
- case REPOKEY_TYPE_MD5:
- {
- int i;
- for (i = 0; i < SIZEOF_MD5; i++)
- read_u8(data);
- break;
- }
- case REPOKEY_TYPE_SHA1:
- {
- int i;
- for (i = 0; i < SIZEOF_SHA1; i++)
- read_u8(data);
- break;
- }
- case REPOKEY_TYPE_SHA256:
- {
- int i;
- for (i = 0; i < SIZEOF_SHA256; i++)
- read_u8(data);
- break;
- }
- case REPOKEY_TYPE_IDARRAY:
- case REPOKEY_TYPE_REL_IDARRAY:
- while ((read_u8(data) & 0xc0) != 0)
- ;
- break;
- case REPOKEY_TYPE_DIRNUMNUMARRAY:
- for (;;)
- {
- read_id(data, numid + data->dirpool.ndirs); /* just check Id */
- read_id(data, 0);
- if (!(read_id(data, 0) & 0x40))
- break;
- }
- break;
- case REPOKEY_TYPE_DIRSTRARRAY:
- for (;;)
- {
- Id id = read_id(data, 0);
- while (read_u8(data) != 0)
- ;
- if (!(id & 0x40))
- break;
- }
- break;
- default:
- pool_debug(mypool, SAT_ERROR, "unknown type %d\n", type);
- data->error = SOLV_ERROR_CORRUPT;
- break;
- }
-}
-
-static int
-key_cmp (const void *pa, const void *pb)
-{
- Repokey *a = (Repokey *)pa;
- Repokey *b = (Repokey *)pb;
- return a->name - b->name;
-}
-
-static void repodata_load_solv(Repodata *data);
-
-static void
-parse_external_repodata(Repodata *maindata, Id *keyp, Repokey *keys, Id *idmap, unsigned numid, unsigned numrel)
-{
- Repo *repo = maindata->repo;
- Id key, id;
- Id *ida, *ide;
- Repodata *data;
- int i, n;
- repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof (*data));
- data = repo->repodata + repo->nrepodata++;
- memset(data, 0, sizeof(*data));
- data->repo = repo;
- data->pagefd = -1;
- data->state = REPODATA_STUB;
- data->loadcallback = repodata_load_solv;
- while ((key = *keyp++) != 0)
- {
- id = keys[key].name;
- switch (keys[key].type)
- {
- case REPOKEY_TYPE_IDARRAY:
- if (id != REPODATA_KEYS)
- {
- skip_item(maindata, REPOKEY_TYPE_IDARRAY, numid, numrel);
- break;
- }
- /* read_idarray writes a terminating 0, that's why the + 1 */
- ida = sat_calloc(keys[key].size + 1, sizeof(Id));
- ide = read_idarray(maindata, numid, idmap, ida, ida + keys[key].size + 1);
- n = ide - ida - 1;
- if (n & 1)
- {
- pool_debug (mypool, SAT_ERROR, "invalid attribute data\n");
- maindata->error = SOLV_ERROR_CORRUPT;
- return;
- }
- data->nkeys = 1 + (n >> 1);
- data->keys = sat_malloc2(data->nkeys, sizeof(data->keys[0]));
- memset(data->keys, 0, sizeof(Repokey));
- for (i = 1, ide = ida; i < data->nkeys; i++)
- {
- data->keys[i].name = *ide++;
- data->keys[i].type = *ide++;
- data->keys[i].size = 0;
- data->keys[i].storage = 0;
- }
- sat_free(ida);
- if (data->nkeys > 2)
- qsort(data->keys + 1, data->nkeys - 1, sizeof(data->keys[0]), key_cmp);
- break;
- case REPOKEY_TYPE_STR:
- if (id != REPODATA_LOCATION)
- skip_item(maindata, REPOKEY_TYPE_STR, numid, numrel);
- else
- {
- char buf[1024];
- unsigned len = sizeof(buf);
- char *filename = buf;
- read_str(maindata, &filename, &len);
- data->location = strdup(filename);
- if (filename != buf)
- free(filename);
- }
- break;
- default:
- skip_item(maindata, keys[key].type, numid, numrel);
- break;
- }
- }
-}
-
-static void
-parse_info_repodata(Repodata *maindata, Id *keyp, Repokey *keys, Id *idmap, unsigned numid, unsigned numrel)
-{
- Id key, id;
- Id *ida;
- while ((key = *keyp++) != 0)
- {
- id = keys[key].name;
- if (id == REPODATA_ADDEDFILEPROVIDES && keys[key].type == REPOKEY_TYPE_REL_IDARRAY)
- {
- Id old = 0;
- /* + 1 just in case */
- ida = sat_calloc(keys[key].size + 1, sizeof(Id));
- read_idarray(maindata, 0, 0, ida, ida + keys[key].size + 1);
- maindata->addedfileprovides = ida;
- for (; *ida; ida++)
- {
- old += *ida - 1;
- if (old >= numid)
- {
- *ida = 0;
- break;
- }
- *ida = idmap ? idmap[old] : old;
- }
- continue;
- }
- if (id == REPODATA_RPMDBCOOKIE && keys[key].type == REPOKEY_TYPE_SHA256)
- {
- int i;
- for (i = 0; i < 32; i++)
- maindata->repo->rpmdbcookie[i] = read_u8(maindata);
- continue;
- }
- skip_item(maindata, keys[key].type, numid, numrel);
- }
-}
-
-/*-----------------------------------------------------------------*/
-
-
-static void
-skip_schema(Repodata *data, Id *keyp, Repokey *keys, unsigned int numid, unsigned int numrel)
-{
- Id key;
- while ((key = *keyp++) != 0)
- skip_item(data, keys[key].type, numid, numrel);
-}
+/*******************************************************************************
+ * functions to add data to our incore memory space
+ */
-/*-----------------------------------------------------------------*/
static void
incore_add_id(Repodata *data, Id x)
@@ -714,12 +405,98 @@
#endif
+/*******************************************************************************
+ * callback to create our stub sub-repodatas from the incore data
+ */
+
+struct create_stub_data {
+ Repodata *data;
+ Id xkeyname;
+};
-// ----------------------------------------------
+int
+create_stub_cb(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv)
+{
+ struct create_stub_data *stubdata = cbdata;
+ if (key->name == REPOSITORY_EXTERNAL && key->type == REPOKEY_TYPE_FLEXARRAY)
+ {
+ if (stubdata->data)
+ {
+ repodata_internalize(stubdata->data);
+ if (data->start != data->end)
+ {
+ repodata_extend(stubdata->data, data->start);
+ repodata_extend(stubdata->data, data->end - 1);
+ }
+ stubdata->data = 0;
+ }
+ if (kv->eof)
+ return SEARCH_NEXT_SOLVABLE;
+ stubdata->data = repo_add_repodata(data->repo, 0);
+ stubdata->data->state = REPODATA_STUB;
+ stubdata->data->loadcallback = repodata_load_stub;
+ return 0;
+ }
+ if (!stubdata->data)
+ return SEARCH_NEXT_KEY;
+ switch(key->type)
+ {
+ case REPOKEY_TYPE_ID:
+ repodata_set_id(stubdata->data, REPOENTRY_META, key->name, kv->id);
+ break;
+ case REPOKEY_TYPE_CONSTANTID:
+ repodata_set_constantid(stubdata->data, REPOENTRY_META, key->name, kv->id);
+ break;
+ case REPOKEY_TYPE_STR:
+ repodata_set_str(stubdata->data, REPOENTRY_META, key->name, kv->str);
+ break;
+ case REPOKEY_TYPE_VOID:
+ repodata_set_void(stubdata->data, REPOENTRY_META, key->name);
+ break;
+ case REPOKEY_TYPE_NUM:
+ repodata_set_num(stubdata->data, REPOENTRY_META, key->name, kv->num);
+ break;
+ case REPOKEY_TYPE_IDARRAY:
+ repodata_add_idarray(stubdata->data, REPOENTRY_META, key->name, kv->id);
+ if (key->name == REPOSITORY_KEYS)
+ {
+ if (!stubdata->xkeyname)
+ stubdata->xkeyname = kv->id;
+ else
+ {
+ Repokey xkey;
+
+ xkey.name = stubdata->xkeyname;
+ xkey.type = kv->id;
+ xkey.storage = KEY_STORAGE_INCORE;
+ xkey.size = 0;
+ repodata_key2id(stubdata->data, &xkey, 1);
+ stubdata->xkeyname = 0;
+ }
+ if (kv->eof)
+ stubdata->xkeyname = 0;
+ }
+ break;
+ case REPOKEY_TYPE_MD5:
+ case REPOKEY_TYPE_SHA1:
+ case REPOKEY_TYPE_SHA256:
+ repodata_set_checksum(stubdata->data, REPOENTRY_META, key->name, key->type, kv->str);
+ break;
+ default:
+ return SEARCH_NEXT_KEY;
+ }
+ return 0;
+}
+
+
+/*******************************************************************************
+ * our main function
+ */
/*
- * read repo from .solv file
- * and add it to pool
+ * read repo from .solv file and add it to pool
+ * if stubdata is set, substitute it with read data
+ * (this is used to replace a repodata stub with the real data)
*/
static int
@@ -728,7 +505,7 @@
Pool *pool = repo->pool;
int i, l;
unsigned int numid, numrel, numdir, numsolv;
- unsigned int numkeys, numschemata, numinfo, numextra, contentver;
+ unsigned int numkeys, numschemata;
Offset sizeid;
Offset *str; /* map Id -> Offset into string space */
@@ -750,12 +527,14 @@
unsigned int solvversion;
Repokey *keys;
Id *schemadata, *schemadatap, *schemadataend;
- Id *schemata, key;
+ Id *schemata, key, *keyp;
+ int nentries;
int have_xdata;
- unsigned oldnrepodata;
int maxsize, allsize;
unsigned char *buf, *dp, *dps;
int left;
+ Id stack[10];
+ int keydepth;
struct _Stringpool *spool;
@@ -776,9 +555,7 @@
solvversion = read_u32(&data);
switch (solvversion)
{
- case SOLV_VERSION_6:
- break;
- case SOLV_VERSION_7:
+ case SOLV_VERSION_8:
break;
default:
pool_debug(pool, SAT_ERROR, "unsupported SOLV version\n");
@@ -793,14 +570,6 @@
numsolv = read_u32(&data);
numkeys = read_u32(&data);
numschemata = read_u32(&data);
- numinfo = read_u32(&data);
- if (solvversion > SOLV_VERSION_6)
- {
- numextra = read_u32(&data);
- contentver = read_u32(&data);
- }
- else
- numextra = 0, contentver = 1;
solvflags = read_u32(&data);
if (numdir && numdir < 2)
@@ -813,22 +582,12 @@
{
if (numrel)
{
- pool_debug(pool, SAT_ERROR, "relations are forbidden in a store\n");
+ pool_debug(pool, SAT_ERROR, "relations are forbidden in a sub-repository\n");
return SOLV_ERROR_CORRUPT;
}
if (parent->end - parent->start != numsolv)
{
- pool_debug(pool, SAT_ERROR, "unequal number of solvables in a store\n");
- return SOLV_ERROR_CORRUPT;
- }
- if (parent->nextra != numextra)
- {
- pool_debug(pool, SAT_ERROR, "unequal number of non-solvables in a store\n");
- return SOLV_ERROR_CORRUPT;
- }
- if (numinfo)
- {
- pool_debug(pool, SAT_ERROR, "info blocks are forbidden in a store\n");
+ pool_debug(pool, SAT_ERROR, "sub-repository solvable number doesn't match main repository (%d - %d)\n", parent->end - parent->start, numsolv);
return SOLV_ERROR_CORRUPT;
}
}
@@ -1141,7 +900,7 @@
type = idmap[type];
else if (parent)
type = str2id(pool, stringpool_id2str(spool, type), 1);
- if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_COUNTED)
+ if (type < REPOKEY_TYPE_VOID || type > REPOKEY_TYPE_FLEXARRAY)
{
pool_debug(pool, SAT_ERROR, "unsupported data type '%s'\n", id2str(pool, type));
data.error = SOLV_ERROR_UNSUPPORTED;
@@ -1175,6 +934,11 @@
data.keys = keys;
data.nkeys = numkeys;
+ for (i = 1; i < numkeys; i++)
+ {
+ id = keys[i].name;
+ data.keybits[(id >> 3) & (sizeof(data.keybits) - 1)] |= 1 << (id & 7);
+ }
/******* Part 5: Schemata ********************************************/
@@ -1200,308 +964,285 @@
data.schemadata = schemadata;
data.schemadatalen = schemadataend - data.schemadata;
+ /******* Part 6: Data ********************************************/
- /******* Part 6: Info ***********************************************/
- oldnrepodata = repo->nrepodata;
- if (numinfo)
- {
- id = read_id(&data, 0);
- id = read_id(&data, 0);
- }
- for (i = 0; i < numinfo; i++)
- {
- /* for now we're just interested in data that starts with
- * the repodata_external id
- */
- Id *keyp;
- id = read_id(&data, numschemata);
- keyp = schemadata + schemata[id];
- key = *keyp;
- if (keys[key].name == REPODATA_EXTERNAL && keys[key].type == REPOKEY_TYPE_VOID)
- {
- /* external data for some ids */
- parse_external_repodata(&data, keyp, keys, idmap, numid, numrel);
- }
- else if (keys[key].name == REPODATA_INFO)
- {
- parse_info_repodata(&data, keyp, keys, idmap, numid, numrel);
- }
- else
- {
- skip_schema(&data, keyp, keys, numid, numrel);
- }
- }
-
-
- /******* Part 7: item data *******************************************/
-
- /* calculate idarray size */
+ idarraydatap = idarraydataend = 0;
size_idarray = 0;
- for (i = 1; i < numkeys; i++)
- {
- id = keys[i].name;
- if ((keys[i].type == REPOKEY_TYPE_IDARRAY || keys[i].type == REPOKEY_TYPE_REL_IDARRAY)
- && id >= INTERESTED_START && id <= INTERESTED_END)
- size_idarray += keys[i].size;
- }
- if (numsolv || numextra)
- {
- maxsize = read_id(&data, 0);
- allsize = read_id(&data, 0);
- if (maxsize > allsize)
- {
- pool_debug(pool, SAT_ERROR, "maxsize %d is greater then allsize %d\n", maxsize, allsize);
- data.error = SOLV_ERROR_CORRUPT;
- }
- }
- else
- maxsize = allsize = 0;
+ maxsize = read_id(&data, 0);
+ allsize = read_id(&data, 0);
+ maxsize += 5; /* so we can read the next schema */
+ if (maxsize > allsize)
+ maxsize = allsize;
- /* allocate needed space in repo */
- /* we add maxsize because it is an upper limit for all idarrays */
- repo_reserve_ids(repo, 0, size_idarray + maxsize + 1);
- idarraydatap = repo->idarraydata + repo->idarraysize;
- repo->idarraysize += size_idarray;
- idarraydataend = idarraydatap + size_idarray;
- repo->lastoff = 0;
-
- /* read solvables */
- if (numsolv)
- {
- if (parent)
- s = pool_id2solvable(pool, parent->start);
- else
- s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv));
- /* store start and end of our id block */
- data.start = s - pool->solvables;
- data.end = data.start + numsolv;
- /* In case we have info blocks, make them refer to our part of the
- repository now. */
- for (i = oldnrepodata; i < repo->nrepodata; i++)
- {
- repo->repodata[i].start = data.start;
- repo->repodata[i].end = data.end;
- }
- }
- else
- s = 0;
+ left = 0;
+ buf = sat_calloc(maxsize + 4, 1);
+ dp = buf;
- if (numextra)
+ l = maxsize;
+ if (l > allsize)
+ l = allsize;
+ if (!l || fread(buf, l, 1, data.fp) != 1)
{
- data.extrastart = repo->nextra;
- repodata_extend_extra(&data, numextra);
- repo->nextra += numextra;
- for (i = oldnrepodata; i < repo->nrepodata; i++)
- {
- repo->repodata[i].extrastart = data.extrastart;
- repo->repodata[i].nextra = data.nextra;
- }
+ pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
+ data.error = SOLV_ERROR_EOF;
+ id = 0;
}
-
- if (have_xdata)
+ else
{
- /* reserve one byte so that all offsets are not zero */
- incore_add_id(&data, 0);
- repodata_extend_block(&data, data.start, numsolv);
+ left = l;
+ allsize -= l;
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
}
- left = 0;
- buf = sat_calloc(maxsize + 4, 1);
- dp = buf;
- for (i = 0; i < numsolv + numextra; i++, s++)
+ incore_add_id(&data, 0); /* XXX? */
+ incore_add_id(&data, id);
+ keyp = schemadata + schemata[id];
+ data.mainschema = id;
+ for (i = 0; keyp[i]; i++)
+ ;
+ if (i)
+ data.mainschemaoffsets = sat_calloc(i, sizeof(Id));
+
+ nentries = 0;
+ keydepth = 0;
+ s = 0;
+ for(;;)
{
- Id *keyp;
- if (data.error)
- break;
-
- left -= (dp - buf);
- if (left < 0)
- {
- pool_debug(mypool, SAT_ERROR, "buffer overrun\n");
- data.error = SOLV_ERROR_EOF;
- break;
- }
- if (left)
- memmove(buf, dp, left);
- l = maxsize - left;
- if (l > allsize)
- l = allsize;
- if (l && fread(buf + left, l, 1, data.fp) != 1)
- {
- pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
- data.error = SOLV_ERROR_EOF;
- break;
- }
- allsize -= l;
- left += l;
- dp = buf;
-
- dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
- if (have_xdata)
+ key = *keyp++;
+#if 0
+printf("key %d at %d\n", key, keyp - 1 - schemadata);
+#endif
+ if (!key)
{
- if (i < numsolv)
- data.incoreoffset[i] = data.incoredatalen;
- else
- data.extraoffset[i - numsolv] = data.incoredatalen;
- incore_add_id(&data, id);
- }
- if (i >= numsolv)
- s = 0;
+ if (nentries)
+ {
+ if (s && keydepth == 2)
+ {
+ s++; /* next solvable */
+ if (have_xdata)
+ data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
+ }
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
+ incore_add_id(&data, id);
+ keyp = schemadata + schemata[id];
+ nentries--;
+ continue;
+ }
+ if (!keydepth)
+ break;
+ keyp = schemadata + stack[--keydepth];
+ nentries = stack[--keydepth];
#if 0
- if (i < numsolv)
- fprintf(stderr, "solv %d: schema %d\n", i, id);
- else
- fprintf(stderr, "extra %d: schema %d\n", i - numsolv, id);
+printf("pop flexarray %d %d\n", keydepth, nentries);
#endif
- keyp = schemadata + schemata[id];
- while ((key = *keyp++) != 0)
+ if (!keydepth && s)
+ s = 0; /* back from solvables */
+ continue;
+ }
+
+ if (keydepth <= 2)
{
+ if (keydepth == 0)
+ data.mainschemaoffsets[keyp - 1 - schemadata + schemata[data.mainschema]] = data.incoredatalen;
+ /* read data chunk to dp */
if (data.error)
break;
+ left -= (dp - buf);
+ if (left < 0)
+ {
+ pool_debug(mypool, SAT_ERROR, "buffer overrun\n");
+ data.error = SOLV_ERROR_EOF;
+ break;
+ }
+ if (left)
+ memmove(buf, dp, left);
+ l = maxsize - left;
+ if (l > allsize)
+ l = allsize;
+ if (l && fread(buf + left, l, 1, data.fp) != 1)
+ {
+ pool_debug(mypool, SAT_ERROR, "unexpected EOF\n");
+ data.error = SOLV_ERROR_EOF;
+ break;
+ }
+ allsize -= l;
+ left += l;
+ dp = buf;
+ }
- id = keys[key].name;
#if 0
- if (i < numsolv)
- fprintf(stderr, "solv %d name %d type %d class %d\n", i, id, keys[key].type, keys[key].storage);
- else
- fprintf(stderr, "extra %d name %d type %d class %d\n", i - numsolv, id, keys[key].type, keys[key].storage);
+printf("=> %s %s %p\n", id2str(pool, keys[key].name), id2str(pool, keys[key].type), s);
#endif
- if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ id = keys[key].name;
+ if (keys[key].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ dps = dp;
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ incore_add_blob(&data, dps, dp - dps);
+ continue;
+ }
+ switch (keys[key].type)
+ {
+ case REPOKEY_TYPE_ID:
+ dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data.error);
+ if (s && id == SOLVABLE_NAME)
+ s->name = did;
+ else if (s && id == SOLVABLE_ARCH)
+ s->arch = did;
+ else if (s && id == SOLVABLE_EVR)
+ s->evr = did;
+ else if (s && id == SOLVABLE_VENDOR)
+ s->vendor = did;
+ else if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_add_id(&data, did);
+#if 0
+ POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %s\n", id2str(pool, id), id2str(pool, did));
+#endif
+ break;
+ case REPOKEY_TYPE_U32:
+ dp = data_read_u32(dp, &h);
+#if 0
+ POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %u\n", id2str(pool, id), h);
+#endif
+ if (s && id == RPM_RPMDBID)
+ {
+ if (!repo->rpmdbid)
+ repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
+ repo->rpmdbid[(s - pool->solvables) - repo->start] = h;
+ }
+ else if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_add_u32(&data, h);
+ break;
+ case REPOKEY_TYPE_IDARRAY:
+ case REPOKEY_TYPE_REL_IDARRAY:
+ if (!s || id < INTERESTED_START || id > INTERESTED_END)
{
- /* copy offset/length into incore */
dps = dp;
- dp = data_skip(dp, REPOKEY_TYPE_ID);
- dp = data_skip(dp, REPOKEY_TYPE_ID);
- incore_add_blob(&data, dps, dp - dps);
- continue;
+ dp = data_skip(dp, REPOKEY_TYPE_IDARRAY);
+ if (keys[key].storage != KEY_STORAGE_INCORE)
+ break;
+ if (idmap)
+ incore_map_idarray(&data, dps, idmap, numid);
+ else
+ incore_add_blob(&data, dps, dp - dps);
+ break;
}
- switch (keys[key].type)
+ ido = idarraydatap - repo->idarraydata;
+ if (keys[key].type == REPOKEY_TYPE_IDARRAY)
+ dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error);
+ else if (id == SOLVABLE_REQUIRES)
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_PREREQMARKER);
+ else if (id == SOLVABLE_PROVIDES)
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_FILEMARKER);
+ else
+ dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, 0);
+ if (idarraydatap > idarraydataend)
{
- case REPOKEY_TYPE_ID:
- dp = data_read_id_max(dp, &did, idmap, numid + numrel, &data.error);
- if (id == SOLVABLE_NAME)
- s->name = did;
- else if (id == SOLVABLE_ARCH)
- s->arch = did;
- else if (id == SOLVABLE_EVR)
- s->evr = did;
- else if (id == SOLVABLE_VENDOR)
- s->vendor = did;
- else if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_id(&data, did);
-#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %s\n", id2str(pool, id), id2str(pool, did));
-#endif
+ pool_debug(pool, SAT_ERROR, "idarray overflow\n");
+ data.error = SOLV_ERROR_OVERFLOW;
break;
- case REPOKEY_TYPE_U32:
- dp = data_read_u32(dp, &h);
+ }
+ if (id == SOLVABLE_PROVIDES)
+ s->provides = ido;
+ else if (id == SOLVABLE_OBSOLETES)
+ s->obsoletes = ido;
+ else if (id == SOLVABLE_CONFLICTS)
+ s->conflicts = ido;
+ else if (id == SOLVABLE_REQUIRES)
+ s->requires = ido;
+ else if (id == SOLVABLE_RECOMMENDS)
+ s->recommends= ido;
+ else if (id == SOLVABLE_SUPPLEMENTS)
+ s->supplements = ido;
+ else if (id == SOLVABLE_SUGGESTS)
+ s->suggests = ido;
+ else if (id == SOLVABLE_ENHANCES)
+ s->enhances = ido;
#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s -> %u\n", id2str(pool, id), h);
+ POOL_DEBUG(SAT_DEBUG_STATS, "%s ->\n", id2str(pool, id));
+ for (; repo->idarraydata[ido]; ido++)
+ POOL_DEBUG(SAT_DEBUG_STATS," %s\n", dep2str(pool, repo->idarraydata[ido]));
#endif
- if (id == RPM_RPMDBID)
- {
- if (!repo->rpmdbid)
- repo->rpmdbid = repo_sidedata_create(repo, sizeof(Id));
- repo->rpmdbid[i] = h;
- }
- else if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_u32(&data, h);
+ break;
+ case REPOKEY_TYPE_FLEXARRAY:
+ if (keydepth == sizeof(stack)/sizeof(*stack))
+ {
+ pool_debug(pool, SAT_ERROR, "flexarray stack overflow\n");
+ data.error = SOLV_ERROR_CORRUPT;
+ break;
+ }
+ stack[keydepth++] = nentries;
+ stack[keydepth++] = keyp - schemadata;
+ dp = data_read_id(dp, &nentries);
+ incore_add_id(&data, nentries);
+ if (!nentries)
+ {
+ /* zero size array? */
+ keydepth--;
+ nentries = stack[--keydepth];
break;
- case REPOKEY_TYPE_IDARRAY:
- case REPOKEY_TYPE_REL_IDARRAY:
- if (id < INTERESTED_START || id > INTERESTED_END)
+ }
+ if (keydepth == 2 && id == REPOSITORY_SOLVABLES)
+ {
+ /* horray! here come the solvables */
+ if (nentries != numsolv)
{
- dps = dp;
- dp = data_skip(dp, REPOKEY_TYPE_IDARRAY);
- if (keys[key].storage != KEY_STORAGE_INCORE)
- break;
- if (idmap)
- incore_map_idarray(&data, dps, idmap, numid);
- else
- incore_add_blob(&data, dps, dp - dps);
+ pool_debug(pool, SAT_ERROR, "inconsistent number of solvables: %d %d\n", nentries, numsolv);
+ data.error = SOLV_ERROR_CORRUPT;
break;
}
- ido = idarraydatap - repo->idarraydata;
- if (keys[key].type == REPOKEY_TYPE_IDARRAY)
- dp = data_read_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error);
- else if (id == SOLVABLE_REQUIRES)
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_PREREQMARKER);
- else if (id == SOLVABLE_PROVIDES)
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, SOLVABLE_FILEMARKER);
- else
- dp = data_read_rel_idarray(dp, &idarraydatap, idmap, numid + numrel, &data.error, 0);
- if (idarraydatap > idarraydataend)
+ if (idarraydatap)
{
- pool_debug(pool, SAT_ERROR, "idarray overflow\n");
- data.error = SOLV_ERROR_OVERFLOW;
+ pool_debug(pool, SAT_ERROR, "more than one solvable block\n");
+ data.error = SOLV_ERROR_CORRUPT;
break;
}
- if (id == SOLVABLE_PROVIDES)
- s->provides = ido;
- else if (id == SOLVABLE_OBSOLETES)
- s->obsoletes = ido;
- else if (id == SOLVABLE_CONFLICTS)
- s->conflicts = ido;
- else if (id == SOLVABLE_REQUIRES)
- s->requires = ido;
- else if (id == SOLVABLE_RECOMMENDS)
- s->recommends= ido;
- else if (id == SOLVABLE_SUPPLEMENTS)
- s->supplements = ido;
- else if (id == SOLVABLE_SUGGESTS)
- s->suggests = ido;
- else if (id == SOLVABLE_ENHANCES)
- s->enhances = ido;
-#if 0
- POOL_DEBUG(SAT_DEBUG_STATS, "%s ->\n", id2str(pool, id));
- for (; repo->idarraydata[ido]; ido++)
- POOL_DEBUG(SAT_DEBUG_STATS," %s\n", dep2str(pool, repo->idarraydata[ido]));
-#endif
- break;
- case REPOKEY_TYPE_COUNTED:
+ if (parent)
+ s = pool_id2solvable(pool, parent->start);
+ else
+ s = pool_id2solvable(pool, repo_add_solvable_block(repo, numsolv));
+ data.start = s - pool->solvables;
+ data.end = data.start + numsolv;
+ repodata_extend_block(&data, data.start, numsolv);
+ for (i = 1; i < numkeys; i++)
{
- Id num, did;
- dp = data_read_id(dp, &num);
- incore_add_id(&data, num);
- dp = data_read_id_max(dp, &did, 0, numschemata, &data.error);
- incore_add_id(&data, did);
- while (num--)
- {
- Id *kp = schemadata + schemata[did];
- for (; *kp; kp++)
- {
- Id tid;
- switch (keys[*kp].type)
- {
- case REPOKEY_TYPE_ID:
- dp = data_read_id_max(dp, &tid, idmap, numid + numrel, &data.error);
- incore_add_id(&data, tid);
- break;
- default:
- dps = dp;
- //dp = data_skip(dp, keys[*kp].type);
- dp = data_skip_recursive(&data, dp, keys + *kp);
- incore_add_blob(&data, dps, dp - dps);
- break;
- }
- }
- }
+ id = keys[i].name;
+ if ((keys[i].type == REPOKEY_TYPE_IDARRAY || keys[i].type == REPOKEY_TYPE_REL_IDARRAY)
+ && id >= INTERESTED_START && id <= INTERESTED_END)
+ size_idarray += keys[i].size;
}
- break;
- default:
- dps = dp;
- //dp = data_skip(dp, keys[key].type);
- dp = data_skip_recursive(&data, dp, keys + key);
- if (keys[key].storage == KEY_STORAGE_INCORE)
- incore_add_blob(&data, dps, dp - dps);
- break;
+ /* allocate needed space in repo */
+ /* we add maxsize because it is an upper limit for all idarrays, thus we can't overflow */
+ repo_reserve_ids(repo, 0, size_idarray + maxsize + 1);
+ idarraydatap = repo->idarraydata + repo->idarraysize;
+ repo->idarraysize += size_idarray;
+ idarraydataend = idarraydatap + size_idarray;
+ repo->lastoff = 0;
+ if (have_xdata)
+ data.incoreoffset[(s - pool->solvables) - data.start] = data.incoredatalen;
}
+ nentries--;
+ dp = data_read_id_max(dp, &id, 0, numschemata, &data.error);
+ incore_add_id(&data, id);
+ keyp = schemadata + schemata[id];
+ break;
+ default:
+ dps = dp;
+ dp = data_skip(dp, keys[key].type);
+ if (keys[key].storage == KEY_STORAGE_INCORE)
+ incore_add_blob(&data, dps, dp - dps);
+ break;
}
}
-
/* should shrink idarraydata again */
+ if (keydepth)
+ {
+ pool_debug(pool, SAT_ERROR, "unexpected EOF, depth = %d\n", keydepth);
+ data.error = SOLV_ERROR_CORRUPT;
+ }
if (!data.error)
{
left -= (dp - buf);
@@ -1556,35 +1297,40 @@
/* no longer needed */
data.fp = 0;
}
+ sat_free(idmap);
+ mypool = 0;
- if (parent && !data.error)
+ if (data.error)
{
- /* we're a store */
- sat_free(parent->schemata);
- sat_free(parent->schemadata);
- sat_free(parent->keys);
- sat_free(parent->location);
+ /* XXX: free repodata? */
+ return data.error;
+ }
+
+ if (parent)
+ {
+ /* overwrite stub repodata */
+ repodata_free(parent);
*parent = data;
}
- else if ((data.incoredatalen || data.fp) && !data.error)
+ else
{
- /* we got some data, make it available */
+ /* make it available as new repodata */
repo->repodata = sat_realloc2(repo->repodata, repo->nrepodata + 1, sizeof(data));
repo->repodata[repo->nrepodata++] = data;
}
- else
+
+ /* create stub repodata entries for all external */
+ for (key = 1 ; key < data.nkeys; key++)
+ if (data.keys[key].name == REPOSITORY_EXTERNAL && data.keys[key].type == REPOKEY_TYPE_FLEXARRAY)
+ break;
+ if (key < data.nkeys)
{
- /* discard data */
- sat_free(data.dirpool.dirs);
- sat_free(data.incoreoffset);
- sat_free(schemata);
- sat_free(schemadata);
- sat_free(keys);
+ struct create_stub_data stubdata;
+ /* got some */
+ memset(&stubdata, 0, sizeof(stubdata));
+ repodata_search(&data, REPOENTRY_META, REPOSITORY_EXTERNAL, create_stub_cb, &stubdata);
}
-
- sat_free(idmap);
- mypool = 0;
- return data.error;
+ return 0;
}
int
@@ -1594,7 +1340,7 @@
}
static void
-repodata_load_solv(Repodata *data)
+repodata_load_stub(Repodata *data)
{
FILE *fp;
Pool *pool = data->repo->pool;
@@ -1603,6 +1349,8 @@
data->state = REPODATA_ERROR;
return;
}
+ /* so that we can retrieve meta data */
+ data->state = REPODATA_AVAILABLE;
fp = pool->loadcallback(pool, data, pool->loadcallbackdata);
if (!fp)
{
Modified: trunk/sat-solver/src/repodata.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repodata.c?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repodata.c (original)
+++ trunk/sat-solver/src/repodata.c Thu Oct 9 14:47:05 2008
@@ -27,6 +27,7 @@
#include "util.h"
#include "repopack.h"
+#include "repopage.h"
extern unsigned int compress_buf (const unsigned char *in, unsigned int in_len,
unsigned char *out, unsigned int out_len);
@@ -52,32 +53,26 @@
data->schemadata = sat_calloc(1, sizeof(Id));
data->nschemata = 1;
data->schemadatalen = 1;
- data->start = repo->start;
- data->end = repo->end;
- data->nextra = repo->nextra;
- data->extrastart = 0;
- data->incoreoffset = sat_extend_resize(0, data->end - data->start, sizeof(Id), REPODATA_BLOCK);
- data->extraoffset = sat_extend_resize(0, repo->nextra, sizeof(Id), REPODATA_BLOCK);
data->pagefd = -1;
}
void
repodata_free(Repodata *data)
{
+ int i;
+
sat_free(data->keys);
+
sat_free(data->schemata);
sat_free(data->schemadata);
+ sat_free(data->schematahash);
- sat_free(data->spool.strings);
- sat_free(data->spool.stringspace);
- sat_free(data->spool.stringhashtbl);
-
- sat_free(data->dirpool.dirs);
- sat_free(data->dirpool.dirtraverse);
+ stringpool_free(&data->spool);
+ dirpool_free(&data->dirpool);
+ sat_free(data->mainschemaoffsets);
sat_free(data->incoredata);
sat_free(data->incoreoffset);
- sat_free(data->extraoffset);
sat_free(data->verticaloffset);
sat_free(data->blob_store);
@@ -86,223 +81,285 @@
sat_free(data->vincore);
+ if (data->attrs)
+ for (i = 0; i < data->end - data->start; i++)
+ sat_free(data->attrs[i]);
sat_free(data->attrs);
- sat_free(data->extraattrs);
+ if (data->xattrs)
+ for (i = 0; i < data->nxattrs; i++)
+ sat_free(data->xattrs[i]);
+ sat_free(data->xattrs);
+
sat_free(data->attrdata);
sat_free(data->attriddata);
- sat_free(data->location);
- sat_free(data->addedfileprovides);
-
if (data->pagefd != -1)
close(data->pagefd);
}
-unsigned char *
-data_skip_recursive(Repodata *data, unsigned char *dp, Repokey *key)
+
+/***************************************************************
+ * key pool management
+ */
+
+/* this is not so time critical that we need a hash, so we do a simple
+ * linear search */
+Id
+repodata_key2id(Repodata *data, Repokey *key, int create)
{
- KeyValue kv;
- if (key->type != REPOKEY_TYPE_COUNTED)
- return data_skip(dp, key->type);
- dp = data_fetch(dp, &kv, key);
- int num = kv.num;
- int schema = kv.id;
- while (num--)
- {
- Id *keyp = data->schemadata + data->schemata[schema];
- for (; *keyp; keyp++)
- dp = data_skip_recursive(data, dp, data->keys + *keyp);
+ Id keyid;
+
+ for (keyid = 1; keyid < data->nkeys; keyid++)
+ if (data->keys[keyid].name == key->name && data->keys[keyid].type == key->type)
+ {
+ if ((key->type == REPOKEY_TYPE_CONSTANT || key->type == REPOKEY_TYPE_CONSTANTID) && key->size != data->keys[keyid].size)
+ continue;
+ break;
+ }
+ if (keyid == data->nkeys)
+ {
+ if (!create)
+ return 0;
+ /* allocate new key */
+ data->keys = sat_realloc2(data->keys, data->nkeys + 1, sizeof(Repokey));
+ data->keys[data->nkeys++] = *key;
+ if (data->verticaloffset)
+ {
+ data->verticaloffset = sat_realloc2(data->verticaloffset, data->nkeys, sizeof(Id));
+ data->verticaloffset[data->nkeys - 1] = 0;
+ }
+ data->keybits[(key->name >> 3) & (sizeof(data->keybits) - 1)] |= 1 << (key->name & 7);
}
- return dp;
+ return keyid;
}
-static unsigned char *
-forward_to_key(Repodata *data, Id keyid, Id schemaid, unsigned char *dp)
+
+/***************************************************************
+ * schema pool management
+ */
+
+#define SCHEMATA_BLOCK 31
+#define SCHEMATADATA_BLOCK 255
+
+Id
+repodata_schema2id(Repodata *data, Id *schema, int create)
{
- Id k, *keyp;
+ int h, len, i;
+ Id *sp, cid;
+ Id *schematahash;
- keyp = data->schemadata + data->schemata[schemaid];
- while ((k = *keyp++) != 0)
+ if ((schematahash = data->schematahash) == 0)
{
- if (k == keyid)
- return dp;
- if (data->keys[k].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ data->schematahash = schematahash = sat_calloc(256, sizeof(Id));
+ for (i = 0; i < data->nschemata; i++)
{
- dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip that offset */
- dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip that length */
- continue;
+ for (sp = data->schemadata + data->schemata[i], h = 0; *sp; len++)
+ h = h * 7 + *sp++;
+ h &= 255;
+ schematahash[h] = i + 1;
}
- if (data->keys[k].storage != KEY_STORAGE_INCORE)
- continue;
- dp = data_skip_recursive(data, dp, data->keys + k);
+ data->schemadata = sat_extend_resize(data->schemadata, data->schemadatalen, sizeof(Id), SCHEMATADATA_BLOCK);
+ data->schemata = sat_extend_resize(data->schemata, data->nschemata, sizeof(Id), SCHEMATA_BLOCK);
}
- return 0;
-}
-#define BLOB_PAGEBITS 15
-#define BLOB_PAGESIZE (1 << BLOB_PAGEBITS)
+ for (sp = schema, len = 0, h = 0; *sp; len++)
+ h = h * 7 + *sp++;
+ h &= 255;
+ len++;
-static unsigned char *
-load_page_range(Repodata *data, unsigned int pstart, unsigned int pend)
+ cid = schematahash[h];
+ if (cid)
+ {
+ cid--;
+ if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
+ return cid;
+ /* cache conflict */
+ for (cid = 0; cid < data->nschemata; cid++)
+ if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
+ return cid;
+ }
+ /* a new one */
+ if (!create)
+ return 0;
+ data->schemadata = sat_extend(data->schemadata, data->schemadatalen, len, sizeof(Id), SCHEMATADATA_BLOCK);
+ data->schemata = sat_extend(data->schemata, data->nschemata, 1, sizeof(Id), SCHEMATA_BLOCK);
+ /* add schema */
+ memcpy(data->schemadata + data->schemadatalen, schema, len * sizeof(Id));
+ data->schemata[data->nschemata] = data->schemadatalen;
+ data->schemadatalen += len;
+ schematahash[h] = data->nschemata + 1;
+#if 0
+fprintf(stderr, "schema2id: new schema\n");
+#endif
+ return data->nschemata++;
+}
+
+void
+repodata_free_schemahash(Repodata *data)
{
-/* Make sure all pages from PSTART to PEND (inclusive) are loaded,
- and are consecutive. Return a pointer to the mapping of PSTART. */
- unsigned char buf[BLOB_PAGESIZE];
- unsigned int i;
-
- /* Quick check in case all pages are there already and consecutive. */
- for (i = pstart; i <= pend; i++)
- if (data->pages[i].mapped_at == -1
- || (i > pstart
- && data->pages[i].mapped_at
- != data->pages[i-1].mapped_at + BLOB_PAGESIZE))
- break;
- if (i > pend)
- return data->blob_store + data->pages[pstart].mapped_at;
+ data->schematahash = sat_free(data->schematahash);
+ /* shrink arrays */
+ data->schemata = sat_realloc2(data->schemata, data->nschemata, sizeof(Id));
+ data->schemadata = sat_realloc2(data->schemadata, data->schemadatalen, sizeof(Id));
+}
- if (data->pagefd == -1)
- return 0;
- /* Ensure that we can map the numbers of pages we need at all. */
- if (pend - pstart + 1 > data->ncanmap)
- {
- unsigned int oldcan = data->ncanmap;
- data->ncanmap = pend - pstart + 1;
- if (data->ncanmap < 4)
- data->ncanmap = 4;
- data->mapped = sat_realloc2(data->mapped, data->ncanmap, sizeof(data->mapped[0]));
- memset (data->mapped + oldcan, 0, (data->ncanmap - oldcan) * sizeof (data->mapped[0]));
- data->blob_store = sat_realloc2(data->blob_store, data->ncanmap, BLOB_PAGESIZE);
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGE: can map %d pages\n", data->ncanmap);
-#endif
- }
+/***************************************************************
+ * dir pool management
+ */
- /* Now search for "cheap" space in our store. Space is cheap if it's either
- free (very cheap) or contains pages we search for anyway. */
+Id
+repodata_str2dir(Repodata *data, const char *dir, int create)
+{
+ Id id, parent;
+ const char *dire;
- /* Setup cost array. */
- unsigned int cost[data->ncanmap];
- for (i = 0; i < data->ncanmap; i++)
- {
- unsigned int pnum = data->mapped[i];
- if (pnum == 0)
- cost[i] = 0;
+ parent = 0;
+ while (*dir == '/' && dir[1] == '/')
+ dir++;
+ if (*dir == '/' && !dir[1])
+ return 1;
+ while (*dir)
+ {
+ dire = strchrnul(dir, '/');
+ if (data->localpool)
+ id = stringpool_strn2id(&data->spool, dir, dire - dir, create);
else
- {
- pnum--;
- Attrblobpage *p = data->pages + pnum;
- assert (p->mapped_at != -1);
- if (pnum >= pstart && pnum <= pend)
- cost[i] = 1;
- else
- cost[i] = 3;
- }
+ id = strn2id(data->repo->pool, dir, dire - dir, create);
+ if (!id)
+ return 0;
+ parent = dirpool_add_dir(&data->dirpool, parent, id, create);
+ if (!parent)
+ return 0;
+ if (!*dire)
+ break;
+ dir = dire + 1;
+ while (*dir == '/')
+ dir++;
}
+ return parent;
+}
- /* And search for cheapest space. */
- unsigned int best_cost = -1;
- unsigned int best = 0;
- unsigned int same_cost = 0;
- for (i = 0; i + pend - pstart < data->ncanmap; i++)
- {
- unsigned int c = cost[i];
- unsigned int j;
- for (j = 0; j < pend - pstart + 1; j++)
- c += cost[i+j];
- if (c < best_cost)
- best_cost = c, best = i;
- else if (c == best_cost)
- same_cost++;
- /* A null cost won't become better. */
- if (c == 0)
- break;
+const char *
+repodata_dir2str(Repodata *data, Id did, const char *suf)
+{
+ Pool *pool = data->repo->pool;
+ int l = 0;
+ Id parent, comp;
+ const char *comps;
+ char *p;
+
+ if (!did)
+ return suf ? suf : "";
+ parent = did;
+ while (parent)
+ {
+ comp = dirpool_compid(&data->dirpool, parent);
+ comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
+ l += strlen(comps);
+ parent = dirpool_parent(&data->dirpool, parent);
+ if (parent)
+ l++;
}
- /* If all places have the same cost we would thrash on slot 0. Avoid
- this by doing a round-robin strategy in this case. */
- if (same_cost == data->ncanmap - pend + pstart - 1)
- best = data->rr_counter++ % (data->ncanmap - pend + pstart);
-
- /* So we want to map our pages from [best] to [best+pend-pstart].
- Use a very simple strategy, which doesn't make the best use of
- our resources, but works. Throw away all pages in that range
- (even ours) then copy around ours (in case they were outside the
- range) or read them in. */
- for (i = best; i < best + pend - pstart + 1; i++)
- {
- unsigned int pnum = data->mapped[i];
- if (pnum--
- /* If this page is exactly at the right place already,
- no need to evict it. */
- && pnum != pstart + i - best)
- {
- /* Evict this page. */
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGE: evict page %d from %d\n", pnum, i);
-#endif
- cost[i] = 0;
- data->mapped[i] = 0;
- data->pages[pnum].mapped_at = -1;
- }
+ if (suf)
+ l += strlen(suf) + 1;
+ p = pool_alloctmpspace(pool, l + 1) + l;
+ *p = 0;
+ if (suf)
+ {
+ p -= strlen(suf);
+ strcpy(p, suf);
+ *--p = '/';
+ }
+ parent = did;
+ while (parent)
+ {
+ comp = dirpool_compid(&data->dirpool, parent);
+ comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
+ l = strlen(comps);
+ p -= l;
+ strncpy(p, comps, l);
+ parent = dirpool_parent(&data->dirpool, parent);
+ if (parent)
+ *--p = '/';
}
+ return p;
+}
+
+
+/***************************************************************
+ * data management
+ */
+
+static inline unsigned char *
+data_skip_schema(Repodata *data, unsigned char *dp, Id schema)
+{
+ Id *keyp = data->schemadata + data->schemata[schema];
+ for (; *keyp; keyp++)
+ dp = data_skip_key(data, dp, data->keys + *keyp);
+ return dp;
+}
- /* Everything is free now. Read in the pages we want. */
- for (i = pstart; i <= pend; i++)
+unsigned char *
+data_skip_key(Repodata *data, unsigned char *dp, Repokey *key)
+{
+ int nentries, schema;
+ switch(key->type)
{
- Attrblobpage *p = data->pages + i;
- unsigned int pnum = i - pstart + best;
- void *dest = data->blob_store + pnum * BLOB_PAGESIZE;
- if (p->mapped_at != -1)
- {
- if (p->mapped_at != pnum * BLOB_PAGESIZE)
- {
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGECOPY: %d to %d\n", i, pnum);
-#endif
- /* Still mapped somewhere else, so just copy it from there. */
- memcpy (dest, data->blob_store + p->mapped_at, BLOB_PAGESIZE);
- data->mapped[p->mapped_at / BLOB_PAGESIZE] = 0;
- }
+ case REPOKEY_TYPE_FIXARRAY:
+ dp = data_read_id(dp, &nentries);
+ if (!nentries)
+ return dp;
+ dp = data_read_id(dp, &schema);
+ while (nentries--)
+ dp = data_skip_schema(data, dp, schema);
+ return dp;
+ case REPOKEY_TYPE_FLEXARRAY:
+ dp = data_read_id(dp, &nentries);
+ while (nentries--)
+ {
+ dp = data_read_id(dp, &schema);
+ dp = data_skip_schema(data, dp, schema);
}
- else
- {
- unsigned int in_len = p->file_size;
- unsigned int compressed = in_len & 1;
- in_len >>= 1;
-#ifdef DEBUG_PAGING
- fprintf (stderr, "PAGEIN: %d to %d", i, pnum);
-#endif
- if (pread(data->pagefd, compressed ? buf : dest, in_len, p->file_offset) != in_len)
- {
- perror ("mapping pread");
- return 0;
- }
- if (compressed)
- {
- unsigned int out_len;
- out_len = unchecked_decompress_buf(buf, in_len,
- dest, BLOB_PAGESIZE);
- if (out_len != BLOB_PAGESIZE && i < data->num_pages - 1)
- {
- fprintf(stderr, "can't decompress\n");
- return 0;
- }
-#ifdef DEBUG_PAGING
- fprintf (stderr, " (expand %d to %d)", in_len, out_len);
-#endif
- }
-#ifdef DEBUG_PAGING
- fprintf (stderr, "\n");
-#endif
+ return dp;
+ default:
+ if (key->storage == KEY_STORAGE_INCORE)
+ dp = data_skip(dp, key->type);
+ else if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ dp = data_skip(dp, REPOKEY_TYPE_ID);
+ }
+ return dp;
+ }
+}
+
+static unsigned char *
+forward_to_key(Repodata *data, Id keyid, Id *keyp, unsigned char *dp)
+{
+ Id k;
+
+ if (!keyid)
+ return 0;
+ while ((k = *keyp++) != 0)
+ {
+ if (k == keyid)
+ return dp;
+ if (data->keys[k].storage == KEY_STORAGE_VERTICAL_OFFSET)
+ {
+ dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip offset */
+ dp = data_skip(dp, REPOKEY_TYPE_ID); /* skip length */
+ continue;
}
- p->mapped_at = pnum * BLOB_PAGESIZE;
- data->mapped[pnum] = i + 1;
+ if (data->keys[k].storage != KEY_STORAGE_INCORE)
+ continue;
+ dp = data_skip_key(data, dp, data->keys + k);
}
- return data->blob_store + best * BLOB_PAGESIZE;
+ return 0;
}
static unsigned char *
-make_vertical_available(Repodata *data, Repokey *key, Id off, Id len)
+get_vertical_data(Repodata *data, Repokey *key, Id off, Id len)
{
unsigned char *dp;
if (!len)
@@ -319,7 +376,7 @@
/* we now have the offset, go into vertical */
off += data->verticaloffset[key - data->keys];
/* fprintf(stderr, "key %d page %d\n", key->name, off / BLOB_PAGESIZE); */
- dp = load_page_range(data, off / BLOB_PAGESIZE, (off + len - 1) / BLOB_PAGESIZE);
+ dp = repodata_load_page_range(data, off / BLOB_PAGESIZE, (off + len - 1) / BLOB_PAGESIZE);
if (dp)
dp += off % BLOB_PAGESIZE;
return dp;
@@ -335,7 +392,7 @@
if (key->storage == KEY_STORAGE_INCORE)
{
/* hmm, this is a bit expensive */
- *dpp = data_skip_recursive(data, dp, key);
+ *dpp = data_skip_key(data, dp, key);
return dp;
}
else if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
@@ -344,280 +401,840 @@
dp = data_read_id(dp, &off);
dp = data_read_id(dp, &len);
*dpp = dp;
- return make_vertical_available(data, key, off, len);
+ return get_vertical_data(data, key, off, len);
+ }
+ return 0;
+}
+
+static int
+load_repodata(Repodata *data)
+{
+ if (data->loadcallback)
+ {
+ data->loadcallback(data);
+ if (data->state == REPODATA_AVAILABLE)
+ return 1;
}
+ data->state = REPODATA_ERROR;
return 0;
}
static inline int
-maybe_load_repodata(Repodata *data, Id *keyid)
+maybe_load_repodata(Repodata *data, Id keyname)
{
- if (data->state == REPODATA_STUB)
+ if (keyname && !repodata_precheck_keyname(data, keyname))
+ return 0; /* do not bother... */
+ switch(data->state)
{
- if (data->loadcallback)
+ case REPODATA_STUB:
+ if (keyname)
{
- if (keyid)
- {
- /* key order may change when loading */
- int i;
- Id name = data->keys[*keyid].name;
- Id type = data->keys[*keyid].type;
- data->loadcallback(data);
- if (data->state == REPODATA_AVAILABLE)
- {
- for (i = 1; i < data->nkeys; i++)
- if (data->keys[i].name == name && data->keys[i].type == type)
- break;
- if (i < data->nkeys)
- *keyid = i;
- else
- return 0;
- }
+ int i;
+ for (i = 0; i < data->nkeys; i++)
+ if (keyname == data->keys[i].name)
+ break;
+ if (i == data->nkeys)
+ return 0;
+ }
+ return load_repodata(data);
+ case REPODATA_ERROR:
+ return 0;
+ case REPODATA_AVAILABLE:
+ return 1;
+ default:
+ data->state = REPODATA_ERROR;
+ return 0;
+ }
+}
+
+static inline unsigned char*
+entry2data(Repodata *data, Id entry, Id *schemap)
+{
+ unsigned char *dp = data->incoredata;
+ if (!dp)
+ return 0;
+ if (entry == REPOENTRY_META) /* META */
+ dp += 1;
+ else if (entry == REPOENTRY_POS) /* META */
+ {
+ *schemap = data->pos.schema;
+ return data->incoredata + data->pos.dp;
+ }
+ else
+ {
+ if (entry < data->start || entry >= data->end)
+ return 0;
+ dp += data->incoreoffset[entry - data->start];
+ }
+ return data_read_id(dp, schemap);
+}
+
+/************************************************************************
+ * data lookup
+ */
+
+static inline Id
+find_schema_key(Repodata *data, Id schema, Id keyname)
+{
+ Id *keyp;
+ for (keyp = data->schemadata + data->schemata[schema]; *keyp; keyp++)
+ if (data->keys[*keyp].name == keyname)
+ return *keyp;
+ return 0;
+}
+
+static inline unsigned char *
+find_key_data(Repodata *data, Id entry, Id keyname, Repokey **keyp)
+{
+ unsigned char *dp, *ddp;
+ Id keyid, schema;
+ Repokey *key;
+
+ if (!maybe_load_repodata(data, keyname))
+ return 0;
+ dp = entry2data(data, entry, &schema);
+ if (!dp)
+ return 0;
+ keyid = find_schema_key(data, schema, keyname);
+ if (!keyid)
+ return 0;
+ key = data->keys + keyid;
+ *keyp = key;
+ if (key->type == REPOKEY_TYPE_VOID || key->type == REPOKEY_TYPE_CONSTANT || key->type == REPOKEY_TYPE_CONSTANTID)
+ return dp;
+ dp = forward_to_key(data, keyid, data->schemadata + data->schemata[schema], dp);
+ if (!dp)
+ return 0;
+ ddp = get_data(data, key, &dp);
+ return ddp;
+}
+
+
+Id
+repodata_lookup_id(Repodata *data, Id entry, Id keyname)
+{
+ unsigned char *dp;
+ Repokey *key;
+ Id id;
+
+ dp = find_key_data(data, entry, keyname, &key);
+ if (!dp)
+ return 0;
+ if (key->type == REPOKEY_TYPE_CONSTANTID)
+ return key->size;
+ if (key->type != REPOKEY_TYPE_ID)
+ return 0;
+ dp = data_read_id(dp, &id);
+ return id;
+}
+
+const char *
+repodata_lookup_str(Repodata *data, Id entry, Id keyname)
+{
+ unsigned char *dp;
+ Repokey *key;
+ Id id;
+
+ dp = find_key_data(data, entry, keyname, &key);
+ if (!dp)
+ return 0;
+ if (key->type == REPOKEY_TYPE_STR)
+ return (const char *)dp;
+ if (key->type == REPOKEY_TYPE_CONSTANTID)
+ return id2str(data->repo->pool, key->size);
+ if (key->type == REPOKEY_TYPE_ID)
+ dp = data_read_id(dp, &id);
+ else
+ return 0;
+ if (data->localpool)
+ return data->spool.stringspace + data->spool.strings[id];
+ return id2str(data->repo->pool, id);
+}
+
+int
+repodata_lookup_num(Repodata *data, Id entry, Id keyname, unsigned int *value)
+{
+ unsigned char *dp;
+ Repokey *key;
+ KeyValue kv;
+
+ *value = 0;
+ dp = find_key_data(data, entry, keyname, &key);
+ if (!dp)
+ return 0;
+ if (key->type == REPOKEY_TYPE_NUM
+ || key->type == REPOKEY_TYPE_U32
+ || key->type == REPOKEY_TYPE_CONSTANT)
+ {
+ dp = data_fetch(dp, &kv, key);
+ *value = kv.num;
+ return 1;
+ }
+ return 0;
+}
+
+int
+repodata_lookup_void(Repodata *data, Id entry, Id keyname)
+{
+ Id schema;
+ Id *keyp;
+ unsigned char *dp;
+
+ if (!maybe_load_repodata(data, keyname))
+ return 0;
+ dp = entry2data(data, entry, &schema);
+ if (!dp)
+ return 0;
+ /* can't use find_schema_key as we need to test the type */
+ for (keyp = data->schemadata + data->schemata[schema]; *keyp; keyp++)
+ if (data->keys[*keyp].name == keyname && data->keys[*keyp].type == REPOKEY_TYPE_VOID)
+ return 1;
+ return 0;
+}
+
+const unsigned char *
+repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyname, Id *typep)
+{
+ unsigned char *dp;
+ Repokey *key;
+
+ dp = find_key_data(data, entry, keyname, &key);
+ if (!dp)
+ return 0;
+ *typep = key->type;
+ return dp;
+}
+
+
+/************************************************************************
+ * data search
+ */
+
+struct subschema_data {
+ Solvable *s;
+ void *cbdata;
+ KeyValue *parent;
+};
+
+/* search in a specific entry */
+void
+repodata_search(Repodata *data, Id entry, Id keyname, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata)
+{
+ Id schema;
+ Repokey *key;
+ Id k, keyid, *kp, *keyp;
+ unsigned char *dp, *ddp;
+ int onekey = 0;
+ int stop;
+ KeyValue kv;
+ Solvable *s;
+
+ if (!maybe_load_repodata(data, keyname))
+ return;
+ if (entry == REPOENTRY_SUBSCHEMA)
+ {
+ struct subschema_data *subd = cbdata;
+ cbdata = subd->cbdata;
+ s = subd->s;
+ schema = subd->parent->id;
+ dp = (unsigned char *)subd->parent->str;
+ kv.parent = subd->parent;
+ }
+ else
+ {
+ schema = 0;
+ dp = entry2data(data, entry, &schema);
+ if (!dp)
+ return;
+ s = data->repo->pool->solvables + entry;
+ kv.parent = 0;
+ }
+ keyp = data->schemadata + data->schemata[schema];
+ if (keyname)
+ {
+ /* search for a specific key */
+ for (kp = keyp; (k = *kp++) != 0; )
+ if (data->keys[k].name == keyname)
+ break;
+ if (k == 0)
+ return;
+ dp = forward_to_key(data, k, data->schemadata + data->schemata[schema], dp);
+ if (!dp)
+ return;
+ keyp = kp - 1;
+ onekey = 1;
+ }
+ while ((keyid = *keyp++) != 0)
+ {
+ stop = 0;
+ key = data->keys + keyid;
+ ddp = get_data(data, key, &dp);
+
+ if (key->type == REPOKEY_TYPE_FLEXARRAY || key->type == REPOKEY_TYPE_FIXARRAY)
+ {
+ struct subschema_data subd;
+ int nentries;
+ Id schema = 0;
+
+ subd.cbdata = cbdata;
+ subd.s = s;
+ subd.parent = &kv;
+ ddp = data_read_id(ddp, &nentries);
+ kv.num = nentries;
+ kv.entry = 0;
+ while (ddp && nentries > 0)
+ {
+ if (key->type == REPOKEY_TYPE_FLEXARRAY || !kv.entry)
+ ddp = data_read_id(ddp, &schema);
+ kv.id = schema;
+ kv.str = (char *)ddp;
+ stop = callback(cbdata, s, data, key, &kv);
+ if (stop > SEARCH_NEXT_KEY)
+ return;
+ if (stop)
+ break;
+ if (!keyname)
+ repodata_search(data, REPOENTRY_SUBSCHEMA, 0, callback, &subd);
+ ddp = data_skip_schema(data, ddp, schema);
+ nentries--;
+ kv.entry++;
+ }
+ if (!nentries)
+ {
+ /* sentinel */
+ kv.eof = 1;
+ kv.str = (char *)ddp;
+ stop = callback(cbdata, s, data, key, &kv);
+ if (stop > SEARCH_NEXT_KEY)
+ return;
+ }
+ if (onekey)
+ return;
+ continue;
+ }
+ kv.entry = 0;
+ do
+ {
+ ddp = data_fetch(ddp, &kv, key);
+ if (!ddp)
+ break;
+ stop = callback(cbdata, s, data, key, &kv);
+ kv.entry++;
+ }
+ while (!kv.eof && !stop);
+ if (onekey || stop > SEARCH_NEXT_KEY)
+ return;
+ }
+}
+
+void
+repodata_set_pos_kv(Repodata *data, KeyValue *kv)
+{
+ if (!kv)
+ {
+ data->pos.dp = 0;
+ data->pos.schema = 0;
+ }
+ else
+ {
+ data->pos.dp = (unsigned char *)kv->str - data->incoredata;
+ data->pos.schema = kv->id;
+ }
+}
+
+/************************************************************************/
+
+static Repokey solvablekeys[RPM_RPMDBID - SOLVABLE_NAME + 1] = {
+ { SOLVABLE_NAME, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_ARCH, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_EVR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_VENDOR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_PROVIDES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_OBSOLETES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_CONFLICTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_REQUIRES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_RECOMMENDS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_SUGGESTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_SUPPLEMENTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { SOLVABLE_ENHANCES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
+ { RPM_RPMDBID, REPOKEY_TYPE_U32, 0, KEY_STORAGE_SOLVABLE },
+};
+
+#if 1
+static inline Id *
+solvabledata_fetch(Solvable *s, KeyValue *kv, Id keyname)
+{
+ kv->id = keyname;
+ switch (keyname)
+ {
+ case SOLVABLE_NAME:
+ kv->eof = 1;
+ return &s->name;
+ case SOLVABLE_ARCH:
+ kv->eof = 1;
+ return &s->arch;
+ case SOLVABLE_EVR:
+ kv->eof = 1;
+ return &s->evr;
+ case SOLVABLE_VENDOR:
+ kv->eof = 1;
+ return &s->vendor;
+ case SOLVABLE_PROVIDES:
+ kv->eof = 0;
+ return s->provides ? s->repo->idarraydata + s->provides : 0;
+ case SOLVABLE_OBSOLETES:
+ kv->eof = 0;
+ return s->obsoletes ? s->repo->idarraydata + s->obsoletes : 0;
+ case SOLVABLE_CONFLICTS:
+ kv->eof = 0;
+ return s->conflicts ? s->repo->idarraydata + s->conflicts : 0;
+ case SOLVABLE_REQUIRES:
+ kv->eof = 0;
+ return s->requires ? s->repo->idarraydata + s->requires : 0;
+ case SOLVABLE_RECOMMENDS:
+ kv->eof = 0;
+ return s->recommends ? s->repo->idarraydata + s->recommends : 0;
+ case SOLVABLE_SUPPLEMENTS:
+ kv->eof = 0;
+ return s->supplements ? s->repo->idarraydata + s->supplements : 0;
+ case SOLVABLE_SUGGESTS:
+ kv->eof = 0;
+ return s->suggests ? s->repo->idarraydata + s->suggests : 0;
+ case SOLVABLE_ENHANCES:
+ kv->eof = 0;
+ return s->enhances ? s->repo->idarraydata + s->enhances : 0;
+ case RPM_RPMDBID:
+ kv->eof = 1;
+ return s->repo->rpmdbid ? s->repo->rpmdbid + (s - s->repo->pool->solvables - s->repo->start) : 0;
+ default:
+ return 0;
+ }
+}
+
+void
+datamatcher_init(Datamatcher *ma, Pool *pool, const char *match, int flags)
+{
+ ma->pool = pool;
+ ma->match = (void *)match;
+ ma->flags = flags;
+ ma->error = 0;
+ if ((flags & SEARCH_STRINGMASK) == SEARCH_REGEX)
+ {
+ ma->match = sat_calloc(1, sizeof(regex_t));
+ ma->error = regcomp((regex_t *)ma->match, match, REG_EXTENDED | REG_NOSUB | REG_NEWLINE | ((flags & SEARCH_NOCASE) ? REG_ICASE : 0));
+ if (ma->error)
+ {
+ sat_free(ma->match);
+ ma->match = (void *)match;
+ ma->flags = (flags & ~SEARCH_STRINGMASK) | SEARCH_ERROR;
+ }
+ }
+}
+
+void
+datamatcher_free(Datamatcher *ma)
+{
+ if ((ma->flags & SEARCH_STRINGMASK) == SEARCH_REGEX && ma->match)
+ {
+ regfree(ma->match);
+ ma->match = sat_free(ma->match);
+ }
+}
+
+int
+datamatcher_match(Datamatcher *ma, Repodata *data, Repokey *key, KeyValue *kv)
+{
+ switch (key->type)
+ {
+ case REPOKEY_TYPE_ID:
+ case REPOKEY_TYPE_IDARRAY:
+ if (data && data->localpool)
+ kv->str = stringpool_id2str(&data->spool, kv->id);
+ else
+ kv->str = id2str(ma->pool, kv->id);
+ break;
+ case REPOKEY_TYPE_STR:
+ break;
+ case REPOKEY_TYPE_DIRSTRARRAY:
+ if (!(ma->flags & SEARCH_FILES))
+ return 0;
+ /* Put the full filename into kv->str. */
+ kv->str = repodata_dir2str(data, kv->id, kv->str);
+ /* And to compensate for that put the "empty" directory into
+ kv->id, so that later calls to repodata_dir2str on this data
+ come up with the same filename again. */
+ kv->id = 0;
+ break;
+ default:
+ return 0;
+ }
+ /* Maybe skip the kind specifier. Do this only for SOLVABLE attributes,
+ for the others we can't know if a colon separates a kind or not. */
+ if ((ma->flags & SEARCH_SKIP_KIND) != 0 && key->storage == KEY_STORAGE_SOLVABLE)
+ {
+ const char *s = strchr(kv->str, ':');
+ if (s)
+ kv->str = s + 1;
+ }
+ switch ((ma->flags & SEARCH_STRINGMASK))
+ {
+ case SEARCH_SUBSTRING:
+ if (ma->flags & SEARCH_NOCASE)
+ {
+ if (!strcasestr(kv->str, (const char *)ma->match))
+ return 0;
+ }
+ else
+ {
+ if (!strstr(kv->str, (const char *)ma->match))
+ return 0;
+ }
+ break;
+ case SEARCH_STRING:
+ if (ma->flags & SEARCH_NOCASE)
+ {
+ if (strcasecmp((const char *)ma->match, kv->str))
+ return 0;
+ }
+ else
+ {
+ if (strcmp((const char *)ma->match, kv->str))
+ return 0;
+ }
+ break;
+ case SEARCH_GLOB:
+ if (fnmatch((const char *)ma->match, kv->str, (ma->flags & SEARCH_NOCASE) ? FNM_CASEFOLD : 0))
+ return 0;
+ break;
+ case SEARCH_REGEX:
+ if (regexec((const regex_t *)ma->match, kv->str, 0, NULL, 0))
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+enum {
+ di_bye,
+
+ di_nextattr,
+ di_nextkey,
+ di_nextrepodata,
+ di_nextsolvable,
+ di_nextrepo,
+
+ di_enterrepo,
+ di_entersolvable,
+ di_enterrepodata,
+ di_enterkey,
+
+ di_nextarrayelement,
+ di_entersub,
+ di_leavesub,
+
+ di_nextsolvableattr,
+ di_nextsolvablekey,
+ di_entersolvablekey
+};
+
+void
+dataiterator_init(Dataiterator *di, Repo *repo, Id p, Id keyname, const char *match, int flags)
+{
+ memset(di, 0, sizeof(*di));
+ di->repo = repo;
+ di->keyname = keyname;
+ di->entry = p;
+ di->pool = repo->pool;
+ if (p)
+ flags |= SEARCH_THISENTRY;
+ di->flags = flags;
+ if (repo)
+ di->repoid = -1;
+ if (match)
+ datamatcher_init(&di->matcher, di->pool, match, flags);
+ di->state = di_enterrepo;
+}
+
+void
+dataiterator_free(Dataiterator *di)
+{
+ if (di->matcher.match)
+ datamatcher_free(&di->matcher);
+}
+
+int
+dataiterator_step(Dataiterator *di)
+{
+ Id schema;
+
+ for (;;)
+ {
+ switch (di->state)
+ {
+ case di_nextattr: di_nextattr:
+ di->kv.entry++;
+ di->ddp = data_fetch(di->ddp, &di->kv, di->key);
+ if (di->kv.eof)
+ di->state = di_nextkey;
+ else
+ di->state = di_nextattr;
+ break;
+
+ case di_nextkey: di_nextkey:
+ if (!di->keyname)
+ {
+ if (*++di->keyp)
+ goto di_enterkey;
+ }
+ else if ((di->flags & SEARCH_SUB) != 0)
+ {
+ Id *keyp = di->keyp;
+ for (keyp++; *keyp; keyp++)
+ if (di->data->keys[*keyp].name == di->keyname ||
+ di->data->keys[*keyp].type == REPOKEY_TYPE_FIXARRAY ||
+ di->data->keys[*keyp].type == REPOKEY_TYPE_FLEXARRAY)
+ break;
+ if (*keyp && (di->dp = forward_to_key(di->data, *keyp, di->keyp, di->dp)) != 0)
+ {
+ di->keyp = keyp;
+ goto di_enterkey;
+ }
+ }
+
+ if (di->kv.parent)
+ goto di_leavesub;
+ /* FALLTHROUGH */
+
+ case di_nextrepodata: di_nextrepodata:
+ if (di->repodataid >= 0 && ++di->repodataid < di->repo->nrepodata)
+ goto di_enterrepodata;
+ /* FALLTHROUGH */
+
+ case di_nextsolvable:
+ if (!(di->flags & SEARCH_THISENTRY))
+ {
+ if (di->entry < 0)
+ di->entry = di->repo->start;
+ else
+ di->entry++;
+ for (; di->entry < di->repo->end; di->entry++)
+ {
+ if (di->pool->solvables[di->entry].repo == di->repo)
+ goto di_entersolvable;
+ }
+ }
+ /* FALLTHROUGH */
+
+ case di_nextrepo:
+ if (di->repoid >= 0)
+ {
+ di->repoid++;
+ if (di->repoid < di->pool->nrepos)
+ {
+ di->repo = di->pool->repos[di->repoid];
+ goto di_enterrepo;
+ }
+ }
+
+ /* FALLTHROUGH */
+ case di_bye:
+ di->state = di_bye;
+ return 0;
+
+ case di_enterrepo: di_enterrepo:
+ if (!(di->flags & SEARCH_THISENTRY))
+ di->entry = di->repo->start;
+ /* FALLTHROUGH */
+
+ case di_entersolvable: di_entersolvable:
+ if (di->repodataid >= 0)
+ {
+ di->repodataid = 0;
+ if (di->entry > 0 && (!di->keyname || (di->keyname >= SOLVABLE_NAME && di->keyname <= RPM_RPMDBID)))
+ {
+ di->key = solvablekeys + (di->keyname ? di->keyname - SOLVABLE_NAME : 0);
+ di->data = 0;
+ goto di_entersolvablekey;
+ }
+ }
+
+ case di_enterrepodata: di_enterrepodata:
+ if (di->repodataid >= 0)
+ di->data = di->repo->repodata + di->repodataid;
+ if (!maybe_load_repodata(di->data, di->keyname))
+ goto di_nextrepodata;
+ di->dp = entry2data(di->data, di->entry, &schema);
+ if (!di->dp)
+ goto di_nextrepodata;
+ di->keyp = di->data->schemadata + di->data->schemata[schema];
+ if (di->keyname)
+ {
+ Id *keyp;
+ if ((di->flags & SEARCH_SUB) != 0)
+ {
+ di->keyp--;
+ goto di_nextkey;
+ }
+ for (keyp = di->keyp; *keyp; keyp++)
+ if (di->data->keys[*keyp].name == di->keyname)
+ break;
+ if (!*keyp)
+ goto di_nextrepodata;
+ di->dp = forward_to_key(di->data, *keyp, di->keyp, di->dp);
+ di->keyp = keyp;
+ if (!di->dp)
+ goto di_nextrepodata;
+ }
+
+ case di_enterkey: di_enterkey:
+ di->kv.entry = -1;
+ di->key = di->data->keys + *di->keyp;
+ di->ddp = get_data(di->data, di->key, &di->dp);
+ if (!di->ddp)
+ goto di_nextkey;
+ if (di->key->type == REPOKEY_TYPE_FIXARRAY || di->key->type == REPOKEY_TYPE_FLEXARRAY)
+ {
+ di->ddp = data_read_id(di->ddp, &di->kv.num);
+ di->kv.entry = -1;
+ di->kv.eof = 0;
+ goto di_nextarrayelement;
+ }
+ goto di_nextattr;
+
+ case di_nextarrayelement: di_nextarrayelement:
+ di->kv.entry++;
+ if (di->kv.entry)
+ di->ddp = data_skip_schema(di->data, di->ddp, di->kv.id);
+ if (di->kv.entry == di->kv.num)
+ {
+ if (di->keyname && di->key->name != di->keyname)
+ goto di_nextkey;
+ di->kv.str = (char *)di->ddp;
+ di->kv.eof = 1;
+ di->state = di_nextkey;
+ break;
}
+ if (di->key->type == REPOKEY_TYPE_FLEXARRAY || !di->kv.entry)
+ di->ddp = data_read_id(di->ddp, &di->kv.id);
+ di->kv.str = (char *)di->ddp;
+ if (di->keyname && di->key->name != di->keyname)
+ goto di_entersub;
+ if ((di->flags & SEARCH_SUB) != 0)
+ di->state = di_entersub;
else
- data->loadcallback(data);
- }
- else
- data->state = REPODATA_ERROR;
- }
- if (data->state == REPODATA_AVAILABLE)
- return 1;
- data->state = REPODATA_ERROR;
- return 0;
-}
+ di->state = di_nextarrayelement;
+ break;
-static inline unsigned char*
-entry2data(Repodata *data, Id entry)
-{
- if (entry < 0)
- return data->incoredata + data->extraoffset[-1 - entry];
- else
- return data->incoredata + data->incoreoffset[entry];
-}
+ case di_entersub: di_entersub:
+ if (di->nparents == sizeof(di->parents)/sizeof(*di->parents) - 1)
+ goto di_nextarrayelement; /* sorry, full */
+ di->parents[di->nparents].kv = di->kv;
+ di->parents[di->nparents].dp = di->dp;
+ di->parents[di->nparents].keyp = di->keyp;
+ di->dp = (unsigned char *)di->kv.str;
+ di->keyp = di->data->schemadata + di->data->schemata[di->kv.id];
+ memset(&di->kv, 0, sizeof(di->kv));
+ di->kv.parent = &di->parents[di->nparents].kv;
+ di->nparents++;
+ di->keyp--;
+ goto di_nextkey;
+
+ case di_leavesub: di_leavesub:
+ di->nparents--;
+ di->dp = di->parents[di->nparents].dp;
+ di->kv = di->parents[di->nparents].kv;
+ di->keyp = di->parents[di->nparents].keyp;
+ di->key = di->data->keys + *di->keyp;
+ di->ddp = (unsigned char *)di->kv.str;
+ goto di_nextarrayelement;
+
+ /* special solvable attr handling follows */
+
+ case di_nextsolvableattr:
+ di->kv.id = *di->idp++;
+ di->kv.entry++;
+ if (!*di->idp)
+ {
+ di->kv.eof = 1;
+ di->state = di_nextsolvablekey;
+ }
+ break;
-Id
-repodata_lookup_id(Repodata *data, Id entry, Id keyid)
-{
- Id schema;
- Repokey *key;
- Id id, *keyp;
- unsigned char *dp;
+ case di_nextsolvablekey: di_nextsolvablekey:
+ if (di->keyname || di->key->name == RPM_RPMDBID)
+ goto di_enterrepodata;
+ di->key++;
+ /* FALLTHROUGH */
+
+ case di_entersolvablekey: di_entersolvablekey:
+ di->idp = solvabledata_fetch(di->pool->solvables + di->entry, &di->kv, di->key->name);
+ if (!di->idp || !di->idp[0])
+ goto di_nextsolvablekey;
+ di->kv.id = di->idp[0];
+ di->kv.num = di->idp[0];
+ if (!di->kv.eof && !di->idp[1])
+ di->kv.eof = 1;
+ di->kv.entry = 0;
+ if (di->kv.eof)
+ di->state = di_nextsolvablekey;
+ else
+ di->state = di_nextsolvableattr;
+ break;
+ }
- if (!maybe_load_repodata(data, &keyid))
- return 0;
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- /* make sure the schema of this solvable contains the key */
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- dp = get_data(data, key, &dp);
- if (!dp)
- return 0;
- if (key->type == REPOKEY_TYPE_CONSTANTID)
- return key->size;
- if (key->type != REPOKEY_TYPE_ID)
- return 0;
- dp = data_read_id(dp, &id);
- return id;
+ if (di->matcher.match)
+ if (!datamatcher_match(&di->matcher, di->data, di->key, &di->kv))
+ continue;
+ /* found something! */
+ return 1;
+ }
}
-const char *
-repodata_lookup_str(Repodata *data, Id entry, Id keyid)
+void
+dataiterator_skip_attribute(Dataiterator *di)
{
- Id schema;
- Repokey *key;
- Id id, *keyp;
- unsigned char *dp;
-
- if (!maybe_load_repodata(data, &keyid))
- return 0;
-
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- /* make sure the schema of this solvable contains the key */
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- dp = get_data(data, key, &dp);
- if (!dp)
- return 0;
- if (key->type == REPOKEY_TYPE_STR)
- return (const char *)dp;
- if (key->type == REPOKEY_TYPE_CONSTANTID)
- return id2str(data->repo->pool, key->size);
- if (key->type == REPOKEY_TYPE_ID)
- dp = data_read_id(dp, &id);
+ if (di->state == di_nextsolvableattr)
+ di->state = di_nextsolvablekey;
else
- return 0;
- if (data->localpool)
- return data->spool.stringspace + data->spool.strings[id];
- return id2str(data->repo->pool, id);
+ di->state = di_nextkey;
}
-int
-repodata_lookup_num(Repodata *data, Id entry, Id keyid, unsigned int *value)
+void
+dataiterator_skip_solvable(Dataiterator *di)
{
- Id schema;
- Repokey *key;
- Id *keyp;
- KeyValue kv;
- unsigned char *dp;
-
- *value = 0;
-
- if (!maybe_load_repodata(data, &keyid))
- return 0;
-
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- /* make sure the schema of this solvable contains the key */
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- dp = get_data(data, key, &dp);
- if (!dp)
- return 0;
- if (key->type == REPOKEY_TYPE_NUM
- || key->type == REPOKEY_TYPE_U32
- || key->type == REPOKEY_TYPE_CONSTANT)
- {
- dp = data_fetch(dp, &kv, key);
- *value = kv.num;
- return 1;
- }
- return 0;
+ di->state = di_nextsolvable;
}
-int
-repodata_lookup_void(Repodata *data, Id entry, Id keyid)
+void
+dataiterator_skip_repo(Dataiterator *di)
{
- Id schema;
- Id *keyp;
- unsigned char *dp;
- if (!maybe_load_repodata(data, &keyid))
- return 0;
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- return 1;
+ di->state = di_nextrepo;
}
-const unsigned char *
-repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyid, Id *typep)
+void
+dataiterator_jump_to_solvable(Dataiterator *di, Solvable *s)
{
- Id schema;
- Id *keyp;
- Repokey *key;
- unsigned char *dp;
-
- if (!maybe_load_repodata(data, &keyid))
- return 0;
- dp = entry2data(data, entry);
- if (!dp)
- return 0;
- dp = data_read_id(dp, &schema);
- for (keyp = data->schemadata + data->schemata[schema]; *keyp != keyid; keyp++)
- if (!*keyp)
- return 0;
- dp = forward_to_key(data, keyid, schema, dp);
- key = data->keys + keyid;
- *typep = key->type;
- return get_data(data, key, &dp);
+ di->repo = s->repo;
+ di->repoid = -1;
+ di->entry = s - di->pool->solvables;
+ di->state = di_entersolvable;
}
void
-repodata_search(Repodata *data, Id entry, Id keyname, int (*callback)(void *cbdata, Solvable *s, Repodata *data, Repokey *key, KeyValue *kv), void *cbdata)
+dataiterator_jump_to_repo(Dataiterator *di, Repo *repo)
{
- Id schema;
- Repokey *key;
- Id k, keyid, *kp, *keyp;
- unsigned char *dp, *ddp;
- int onekey = 0;
- int stop;
- KeyValue kv;
+ di->repo = repo;
+ di->repoid = -1;
+ di->state = di_enterrepo;
+}
- if (entry < 0
- || !maybe_load_repodata(data, 0))
- return;
+#else
- dp = entry2data(data, entry);
- if (!dp)
- return;
- dp = data_read_id(dp, &schema);
- keyp = data->schemadata + data->schemata[schema];
- if (keyname)
- {
- /* search in a specific key */
- for (kp = keyp; (k = *kp++) != 0; )
- if (data->keys[k].name == keyname)
- break;
- if (k == 0)
- return;
- dp = forward_to_key(data, k, schema, dp);
- if (!dp)
- return;
- keyp = kp - 1;
- onekey = 1;
- }
- while ((keyid = *keyp++) != 0)
- {
- stop = 0;
- key = data->keys + keyid;
- ddp = get_data(data, key, &dp);
- do
- {
- ddp = data_fetch(ddp, &kv, key);
- if (!ddp)
- break;
- if (key->type == REPOKEY_TYPE_COUNTED)
- {
- int num = kv.num;
- int subschema = kv.id;
- Repokey *countkey = key;
- kv.eof = 0;
- callback(cbdata, data->repo->pool->solvables + data->start + entry, data, countkey, &kv);
- while (num--)
- {
- Id *kp = data->schemadata + data->schemata[subschema];
- for (; *kp; kp++)
- {
- key = data->keys + *kp;
- ddp = data_fetch(ddp, &kv, key);
- if (!ddp)
- exit(1);
- callback(cbdata, data->repo->pool->solvables + data->start + entry, data, key, &kv);
- }
- kv.eof = 1;
- callback(cbdata, data->repo->pool->solvables + data->start + entry, data, countkey, &kv);
- }
- kv.eof = 2;
- stop = callback(cbdata, data->repo->pool->solvables + data->start + entry, data, countkey, &kv);
- }
- else
- stop = callback(cbdata, data->repo->pool->solvables + data->start + entry, data, key, &kv);
- }
- while (!kv.eof && !stop);
- if (onekey || stop > SEARCH_NEXT_KEY)
- return;
- }
-}
+/************************************************************************
+ * data search iterator
+ */
static void
dataiterator_newdata(Dataiterator *di)
@@ -652,8 +1269,6 @@
return;
if (di->solvid >= 0)
dp += data->incoreoffset[di->solvid - data->start];
- else
- dp += data->extraoffset[-1 - di->solvid - data->extrastart];
dp = data_read_id(dp, &schema);
Id *keyp = data->schemadata + data->schemata[schema];
if (keyname)
@@ -665,7 +1280,7 @@
break;
if (k == 0)
return;
- dp = forward_to_key(data, k, schema, dp);
+ dp = forward_to_key(data, k, keyp, dp);
if (!dp)
return;
keyp = kp - 1;
@@ -689,7 +1304,7 @@
const char *match, int flags)
{
di->flags = flags;
- if (p)
+ if (p > 0)
{
di->solvid = p;
di->flags |= __SEARCH_ONESOLVABLE;
@@ -845,22 +1460,6 @@
return dataiterator_match_int_real(di, flags, vmatch);
}
-static Repokey solvablekeys[RPM_RPMDBID - SOLVABLE_NAME + 1] = {
- { SOLVABLE_NAME, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_ARCH, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_EVR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_VENDOR, REPOKEY_TYPE_ID, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_PROVIDES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_OBSOLETES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_CONFLICTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_REQUIRES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_RECOMMENDS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_SUGGESTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_SUPPLEMENTS, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { SOLVABLE_ENHANCES, REPOKEY_TYPE_IDARRAY, 0, KEY_STORAGE_SOLVABLE },
- { RPM_RPMDBID, REPOKEY_TYPE_U32, 0, KEY_STORAGE_SOLVABLE },
-};
-
int
dataiterator_step(Dataiterator *di)
{
@@ -869,8 +1468,10 @@
{
if (di->state)
{
+ /* we're stepping through solvable data, 1 -> SOLVABLE_NAME... */
if (di->idp)
{
+ /* we're stepping through an id array */
Id *idp = di->idp;
if (*idp)
{
@@ -983,7 +1584,14 @@
/* Send end-of-element. See above for keyp[-1]. */
di->kv.eof = 1;
di->key = di->data->keys + di->keyp[-1];
- di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ if (di->subschema)
+ di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ else
+ {
+ di->dp = data_read_id(di->dp, &di->subschema);
+ di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ di->subschema = 0;
+ }
di->subnum--;
}
else
@@ -1023,15 +1631,11 @@
{
if (!(di->flags & SEARCH_EXTRA))
goto skiprepo;
- di->solvid = -1;
- if (di->solvid < -repo->nextra)
- goto skiprepo;
+ goto skiprepo;
}
}
else
{
- --di->solvid;
- if (di->solvid < -repo->nextra)
{
skiprepo:;
Pool *pool = di->repo->pool;
@@ -1048,16 +1652,14 @@
}
}
di->data = repo->repodata - 1;
- if (di->solvid < 0
- || (di->flags & SEARCH_NO_STORAGE_SOLVABLE))
+ if ((di->flags & SEARCH_NO_STORAGE_SOLVABLE))
continue;
static Id zeroid = 0;
di->keyp = &zeroid;
di->state = 1;
goto restart;
}
- if ((di->solvid < 0 && (-1 - di->solvid) >= data->extrastart && (-1 - di->solvid) < (data->extrastart + data->nextra))
- || (di->solvid >= 0 && di->solvid >= data->start && di->solvid < data->end))
+ if ((di->solvid >= 0 && di->solvid >= data->start && di->solvid < data->end))
{
dataiterator_newdata(di);
if (di->nextkeydp)
@@ -1072,13 +1674,21 @@
}
di->dp = data_fetch(di->dp, &di->kv, di->key);
}
- if (di->key->type == REPOKEY_TYPE_COUNTED)
+ if (di->key->type == REPOKEY_TYPE_FIXARRAY)
{
di->subnum = di->kv.num;
di->subschema = di->kv.id;
di->kv.eof = 0;
di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
}
+ if (di->key->type == REPOKEY_TYPE_FLEXARRAY)
+ {
+ di->subnum = di->kv.num;
+ di->kv.eof = 0;
+ di->dp = data_read_id(di->dp, &di->subschema);
+ di->subkeyp = di->data->schemadata + di->data->schemata[di->subschema];
+ di->subschema = 0;
+ }
}
weg2:
if (!di->match
@@ -1117,7 +1727,7 @@
{
dataiterator_skip_solvable(di);
/* We're done with all solvables and all extra things for this repo. */
- di->solvid = -1 - di->repo->nextra;
+ di->solvid = -1;
}
void
@@ -1138,6 +1748,12 @@
di->solvid = repo->start - 1;
}
+#endif
+
+/************************************************************************
+ * data modify functions
+ */
+
/* extend repodata so that it includes solvables p */
void
repodata_extend(Repodata *data, Id p)
@@ -1175,21 +1791,6 @@
}
void
-repodata_extend_extra(Repodata *data, int nextra)
-{
- if (nextra <= data->nextra)
- return;
- if (data->extraattrs)
- {
- data->extraattrs = sat_extend(data->extraattrs, data->nextra, nextra - data->nextra, sizeof(Id), REPODATA_BLOCK);
- memset(data->extraattrs + data->nextra, 0, (nextra - data->nextra) * sizeof (Id));
- }
- data->extraoffset = sat_extend(data->extraoffset, data->nextra, nextra - data->nextra, sizeof(Id), REPODATA_BLOCK);
- memset(data->extraoffset + data->nextra, 0, (nextra - data->nextra) * sizeof(Id));
- data->nextra = nextra;
-}
-
-void
repodata_extend_block(Repodata *data, Id start, Id num)
{
if (!num)
@@ -1212,56 +1813,49 @@
#define REPODATA_ATTRDATA_BLOCK 1023
#define REPODATA_ATTRIDDATA_BLOCK 63
-static inline Id
-get_new_struct(Repodata *data)
+
+Id
+repodata_new_handle(Repodata *data)
{
- /* Make sure to never give out struct id 0. */
- if (!data->structs)
+ if (!data->nxattrs)
{
- data->structs = sat_extend(0, 0, 2, sizeof(Id *), REPODATA_BLOCK);
- data->structs[0] = 0;
- data->structs[1] = 0;
- data->nstructs = 2;
- return 1;
+ data->xattrs = sat_calloc_block(1, sizeof(Id *), REPODATA_BLOCK);
+ data->nxattrs = 2;
}
- data->structs = sat_extend(data->structs, data->nstructs, 1, sizeof(Id *), REPODATA_BLOCK);
- data->structs[data->nstructs] = 0;
- return data->nstructs++;
+ data->xattrs = sat_extend(data->xattrs, data->nxattrs, 1, sizeof(Id *), REPODATA_BLOCK);
+ data->xattrs[data->nxattrs] = 0;
+ return -(data->nxattrs++);
}
-static Id
-repodata_get_handle_int(Repodata *data, Id entry)
+static inline Id **
+repodata_get_attrp(Repodata *data, Id handle)
{
- Id *ap;
- if (!data->attrs && entry >= 0)
+ if (handle == REPOENTRY_META)
{
- data->attrs = sat_calloc_block(data->end - data->start, sizeof(Id),
- REPODATA_BLOCK);
+ if (!data->xattrs)
+ {
+ data->xattrs = sat_calloc_block(1, sizeof(Id *), REPODATA_BLOCK);
+ data->nxattrs = 2;
+ }
}
- else if (!data->extraattrs && entry < 0)
- data->extraattrs = sat_calloc_block(data->nextra, sizeof(Id), REPODATA_BLOCK);
- if (entry < 0)
- ap = &data->extraattrs[-1 - entry];
- else
- ap = &data->attrs[entry];
- if (!*ap)
- *ap = get_new_struct(data);
- return *ap;
-}
-
-Id
-repodata_get_handle(Repodata *data, Id entry)
-{
- return repodata_get_handle_int(data, entry);
+ if (handle < 0)
+ return data->xattrs - handle;
+ if (handle < data->start || handle >= data->end)
+ repodata_extend(data, handle);
+ if (!data->attrs)
+ data->attrs = sat_calloc_block(data->end - data->start, sizeof(Id *), REPODATA_BLOCK);
+ return data->attrs + (handle - data->start);
}
static void
repodata_insert_keyid(Repodata *data, Id handle, Id keyid, Id val, int overwrite)
{
Id *pp;
- Id *ap;
+ Id *ap, **app;
int i;
- ap = data->structs[handle];
+
+ app = repodata_get_attrp(data, handle);
+ ap = *app;
i = 0;
if (ap)
{
@@ -1282,37 +1876,20 @@
i = pp - ap;
}
ap = sat_extend(ap, i, 3, sizeof(Id), REPODATA_ATTRS_BLOCK);
- data->structs[handle] = ap;
+ *app = ap;
pp = ap + i;
*pp++ = keyid;
*pp++ = val;
- *pp = 0;
-}
-
-void
-repodata_set(Repodata *data, Id handle, Repokey *key, Id val)
-{
- Id keyid;
-
- /* find key in keys */
- for (keyid = 1; keyid < data->nkeys; keyid++)
- if (data->keys[keyid].name == key->name && data->keys[keyid].type == key->type)
- {
- if ((key->type == REPOKEY_TYPE_CONSTANT || key->type == REPOKEY_TYPE_CONSTANTID) && key->size != data->keys[keyid].size)
- continue;
- break;
- }
- if (keyid == data->nkeys)
- {
- /* allocate new key */
- data->keys = sat_realloc2(data->keys, data->nkeys + 1, sizeof(Repokey));
- data->keys[data->nkeys++] = *key;
- if (data->verticaloffset)
- {
- data->verticaloffset = sat_realloc2(data->verticaloffset, data->nkeys, sizeof(Id));
- data->verticaloffset[data->nkeys - 1] = 0;
- }
- }
+ *pp = 0;
+}
+
+
+void
+repodata_set(Repodata *data, Id handle, Repokey *key, Id val)
+{
+ Id keyid;
+
+ keyid = repodata_key2id(data, key, 1);
repodata_insert_keyid(data, handle, keyid, val, 1);
}
@@ -1408,7 +1985,7 @@
repodata_add_array(Repodata *data, Id handle, Id keyname, Id keytype, int entrysize)
{
int oldsize;
- Id *ida, *pp;
+ Id *ida, *pp, **ppp;
if (handle == data->lasthandle && data->keys[data->lastkey].name == keyname && data->keys[data->lastkey].type == keytype && data->attriddatalen == data->lastdatalen)
{
@@ -1418,7 +1995,8 @@
data->lastdatalen += entrysize;
return;
}
- pp = data->structs[handle];
+ ppp = repodata_get_attrp(data, handle);
+ pp = *ppp;
if (pp)
for (; *pp; pp += 2)
if (data->keys[*pp].name == keyname && data->keys[*pp].type == keytype)
@@ -1624,37 +2202,40 @@
repodata_add_idarray(data, handle, keyname, id);
}
-Id
-repodata_create_struct(Repodata *data, Id handle, Id keyname)
+void
+repodata_add_fixarray(Repodata *data, Id handle, Id keyname, Id ghandle)
+{
+ repodata_add_array(data, handle, keyname, REPOKEY_TYPE_FIXARRAY, 1);
+ data->attriddata[data->attriddatalen++] = ghandle;
+ data->attriddata[data->attriddatalen++] = 0;
+}
+
+void
+repodata_add_flexarray(Repodata *data, Id handle, Id keyname, Id ghandle)
{
- Id newhandle = get_new_struct(data);
- repodata_add_array(data, handle, keyname, REPOKEY_TYPE_COUNTED, 1);
- data->attriddata[data->attriddatalen++] = newhandle;
+ repodata_add_array(data, handle, keyname, REPOKEY_TYPE_FLEXARRAY, 1);
+ data->attriddata[data->attriddatalen++] = ghandle;
data->attriddata[data->attriddatalen++] = 0;
- return newhandle;
}
void
repodata_merge_attrs(Repodata *data, Id dest, Id src)
{
Id *keyp;
- if (dest == src
- || !(keyp = data->structs[src < 0
- ? data->extraattrs[-1 - src]
- : data->attrs[src]]))
+ if (dest == src || !(keyp = data->attrs[src]))
return;
- dest = repodata_get_handle_int(data, dest);
for (; *keyp; keyp += 2)
repodata_insert_keyid(data, dest, keyp[0], keyp[1], 0);
}
-/*********************************/
+
+
+
+/**********************************************************************/
/* unify with repo_write! */
#define EXTDATA_BLOCK 1023
-#define SCHEMATA_BLOCK 31
-#define SCHEMATADATA_BLOCK 255
struct extdata {
unsigned char *buf;
@@ -1701,63 +2282,9 @@
/*********************************/
static void
-addschema_prepare(Repodata *data, Id *schematacache)
-{
- int h, len, i;
- Id *sp;
-
- memset(schematacache, 0, 256 * sizeof(Id));
- for (i = 0; i < data->nschemata; i++)
- {
- for (sp = data->schemadata + data->schemata[i], h = 0; *sp; len++)
- h = h * 7 + *sp++;
- h &= 255;
- schematacache[h] = i + 1;
- }
- data->schemadata = sat_extend_resize(data->schemadata, data->schemadatalen, sizeof(Id), SCHEMATADATA_BLOCK);
- data->schemata = sat_extend_resize(data->schemata, data->nschemata, sizeof(Id), SCHEMATA_BLOCK);
-}
-
-static Id
-addschema(Repodata *data, Id *schema, Id *schematacache)
-{
- int h, len;
- Id *sp, cid;
-
- for (sp = schema, len = 0, h = 0; *sp; len++)
- h = h * 7 + *sp++;
- h &= 255;
- len++;
-
- cid = schematacache[h];
- if (cid)
- {
- cid--;
- if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
- return cid;
- /* cache conflict */
- for (cid = 0; cid < data->nschemata; cid++)
- if (!memcmp(data->schemadata + data->schemata[cid], schema, len * sizeof(Id)))
- return cid;
- }
- /* a new one. make room. */
- data->schemadata = sat_extend(data->schemadata, data->schemadatalen, len, sizeof(Id), SCHEMATADATA_BLOCK);
- data->schemata = sat_extend(data->schemata, data->nschemata, 1, sizeof(Id), SCHEMATA_BLOCK);
- /* add schema */
- memcpy(data->schemadata + data->schemadatalen, schema, len * sizeof(Id));
- data->schemata[data->nschemata] = data->schemadatalen;
- data->schemadatalen += len;
- schematacache[h] = data->nschemata + 1;
-#if 0
-fprintf(stderr, "addschema: new schema\n");
-#endif
- return data->nschemata++;
-}
-
-static void
repodata_serialize_key(Repodata *data, struct extdata *newincore,
struct extdata *newvincore,
- Id *schema, Id *schematacache,
+ Id *schema,
Repokey *key, Id val)
{
/* Otherwise we have a new value. Parse it into the internal
@@ -1775,97 +2302,128 @@
}
switch (key->type)
{
- case REPOKEY_TYPE_VOID:
- case REPOKEY_TYPE_CONSTANT:
- case REPOKEY_TYPE_CONSTANTID:
- break;
- case REPOKEY_TYPE_STR:
- data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1);
- break;
- case REPOKEY_TYPE_MD5:
- data_addblob(xd, data->attrdata + val, SIZEOF_MD5);
- break;
- case REPOKEY_TYPE_SHA1:
- data_addblob(xd, data->attrdata + val, SIZEOF_SHA1);
- break;
- case REPOKEY_TYPE_ID:
- case REPOKEY_TYPE_NUM:
- case REPOKEY_TYPE_DIR:
- data_addid(xd, val);
- break;
- case REPOKEY_TYPE_IDARRAY:
+ case REPOKEY_TYPE_VOID:
+ case REPOKEY_TYPE_CONSTANT:
+ case REPOKEY_TYPE_CONSTANTID:
+ break;
+ case REPOKEY_TYPE_STR:
+ data_addblob(xd, data->attrdata + val, strlen((char *)(data->attrdata + val)) + 1);
+ break;
+ case REPOKEY_TYPE_MD5:
+ data_addblob(xd, data->attrdata + val, SIZEOF_MD5);
+ break;
+ case REPOKEY_TYPE_SHA1:
+ data_addblob(xd, data->attrdata + val, SIZEOF_SHA1);
+ break;
+ case REPOKEY_TYPE_SHA256:
+ data_addblob(xd, data->attrdata + val, SIZEOF_SHA256);
+ break;
+ case REPOKEY_TYPE_ID:
+ case REPOKEY_TYPE_NUM:
+ case REPOKEY_TYPE_DIR:
+ data_addid(xd, val);
+ break;
+ case REPOKEY_TYPE_IDARRAY:
+ for (ida = data->attriddata + val; *ida; ida++)
+ data_addideof(xd, ida[0], ida[1] ? 0 : 1);
+ break;
+ case REPOKEY_TYPE_DIRNUMNUMARRAY:
+ for (ida = data->attriddata + val; *ida; ida += 3)
+ {
+ data_addid(xd, ida[0]);
+ data_addid(xd, ida[1]);
+ data_addideof(xd, ida[2], ida[3] ? 0 : 1);
+ }
+ break;
+ case REPOKEY_TYPE_DIRSTRARRAY:
+ for (ida = data->attriddata + val; *ida; ida += 2)
+ {
+ data_addideof(xd, ida[0], ida[2] ? 0 : 1);
+ data_addblob(xd, data->attrdata + ida[1], strlen((char *)(data->attrdata + ida[1])) + 1);
+ }
+ break;
+ case REPOKEY_TYPE_FIXARRAY:
+ {
+ int num = 0;
+ schemaid = 0;
for (ida = data->attriddata + val; *ida; ida++)
- data_addideof(xd, ida[0], ida[1] ? 0 : 1);
- break;
- case REPOKEY_TYPE_DIRNUMNUMARRAY:
- for (ida = data->attriddata + val; *ida; ida += 3)
- {
- data_addid(xd, ida[0]);
- data_addid(xd, ida[1]);
- data_addideof(xd, ida[2], ida[3] ? 0 : 1);
- }
- break;
- case REPOKEY_TYPE_DIRSTRARRAY:
- for (ida = data->attriddata + val; *ida; ida += 2)
{
- data_addideof(xd, ida[0], ida[2] ? 0 : 1);
- data_addblob(xd, data->attrdata + ida[1], strlen((char *)(data->attrdata + ida[1])) + 1);
- }
- break;
- case REPOKEY_TYPE_COUNTED:
- {
- int num = 0;
- schemaid = 0;
- for (ida = data->attriddata + val; *ida; ida++)
- {
#if 0
- fprintf(stderr, "serialize struct %d\n", *ida);
+ fprintf(stderr, "serialize struct %d\n", *ida);
#endif
- sp = schema;
- Id *kp = data->structs[*ida];
- if (!kp)
- continue;
- num++;
- for (;*kp; kp += 2)
- {
+ sp = schema;
+ Id *kp = data->xattrs[-*ida];
+ if (!kp)
+ continue;
+ num++;
+ for (;*kp; kp += 2)
+ {
#if 0
- fprintf(stderr, " %s:%d\n", id2str(data->repo->pool, data->keys[*kp].name), kp[1]);
+ fprintf(stderr, " %s:%d\n", id2str(data->repo->pool, data->keys[*kp].name), kp[1]);
#endif
- *sp++ = *kp;
- }
- *sp = 0;
- if (!schemaid)
- schemaid = addschema(data, schema, schematacache);
- else if (schemaid != addschema(data, schema, schematacache))
- {
- fprintf(stderr, " not yet implemented: substructs with different schemas\n");
- exit(1);
- }
+ *sp++ = *kp;
+ }
+ *sp = 0;
+ if (!schemaid)
+ schemaid = repodata_schema2id(data, schema, 1);
+ else if (schemaid != repodata_schema2id(data, schema, 0))
+ {
+ fprintf(stderr, " not yet implemented: substructs with different schemas\n");
+ exit(1);
+ }
#if 0
- fprintf(stderr, " schema %d\n", schemaid);
+ fprintf(stderr, " schema %d\n", schemaid);
#endif
- }
- if (!num)
- break;
- data_addid(xd, num);
- data_addid(xd, schemaid);
- for (ida = data->attriddata + val; *ida; ida++)
- {
- Id *kp = data->structs[*ida];
- if (!kp)
- continue;
- for (;*kp; kp += 2)
- {
- repodata_serialize_key(data, newincore, newvincore,
- schema, schematacache,
- data->keys + *kp, kp[1]);
- }
- }
+ }
+ if (!num)
break;
- }
- default:
- fprintf(stderr, "don't know how to handle type %d\n", key->type);
- exit(1);
+ data_addid(xd, num);
+ data_addid(xd, schemaid);
+ for (ida = data->attriddata + val; *ida; ida++)
+ {
+ Id *kp = data->xattrs[-*ida];
+ if (!kp)
+ continue;
+ for (;*kp; kp += 2)
+ {
+ repodata_serialize_key(data, newincore, newvincore,
+ schema, data->keys + *kp, kp[1]);
+ }
+ }
+ break;
+ }
+ case REPOKEY_TYPE_FLEXARRAY:
+ {
+ int num = 0;
+ for (ida = data->attriddata + val; *ida; ida++)
+ num++;
+ data_addid(xd, num);
+ for (ida = data->attriddata + val; *ida; ida++)
+ {
+ Id *kp = data->xattrs[-*ida];
+ if (!kp)
+ {
+ data_addid(xd, 0); /* XXX */
+ continue;
+ }
+ sp = schema;
+ for (;*kp; kp += 2)
+ *sp++ = *kp;
+ *sp = 0;
+ schemaid = repodata_schema2id(data, schema, 1);
+ data_addid(xd, schemaid);
+ kp = data->xattrs[-*ida];
+ for (;*kp; kp += 2)
+ {
+ repodata_serialize_key(data, newincore, newvincore,
+ schema, data->keys + *kp, kp[1]);
+ }
+ }
+ break;
+ }
+ default:
+ fprintf(stderr, "don't know how to handle type %d\n", key->type);
+ exit(1);
}
if (key->storage == KEY_STORAGE_VERTICAL_OFFSET)
{
@@ -1879,42 +2437,53 @@
void
repodata_internalize(Repodata *data)
{
- Repokey *key;
+ Repokey *key, solvkey;
Id entry, nentry;
- Id schematacache[256];
- Id schemaid, *schema, *sp, oldschema, *keyp, *seen;
+ Id schemaid, *schema, *sp, oldschema, *keyp, *keypstart, *seen;
unsigned char *dp, *ndp;
int newschema, oldcount;
struct extdata newincore;
struct extdata newvincore;
+ Id solvkeyid;
- if (!data->attrs && !data->extraattrs)
+ if (!data->attrs && !data->xattrs)
return;
newvincore.buf = data->vincore;
newvincore.len = data->vincorelen;
+ /* find the solvables key, create if needed */
+ memset(&solvkey, 0, sizeof(solvkey));
+ solvkey.name = REPOSITORY_SOLVABLES;
+ solvkey.type = REPOKEY_TYPE_FLEXARRAY;
+ solvkey.size = 0;
+ solvkey.storage = KEY_STORAGE_INCORE;
+ solvkeyid = repodata_key2id(data, &solvkey, data->end != data->start ? 1 : 0);
+
schema = sat_malloc2(data->nkeys, sizeof(Id));
seen = sat_malloc2(data->nkeys, sizeof(Id));
/* Merge the data already existing (in data->schemata, ->incoredata and
friends) with the new attributes in data->attrs[]. */
nentry = data->end - data->start;
- addschema_prepare(data, schematacache);
memset(&newincore, 0, sizeof(newincore));
- data_addid(&newincore, 0);
- if (!data->attrs)
- nentry = 0;
- for (entry = data->extraattrs ? -data->nextra : 0; entry < nentry; entry++)
+ data_addid(&newincore, 0); /* start data at offset 1 */
+
+ data->mainschema = 0;
+ data->mainschemaoffsets = sat_free(data->mainschemaoffsets);
+
+ /* join entry data */
+ /* we start with the meta data, entry -1 */
+ for (entry = -1; entry < nentry; entry++)
{
- Id handle;
memset(seen, 0, data->nkeys * sizeof(Id));
- sp = schema;
- dp = entry2data(data, entry);
- if (data->incoredata)
- dp = data_read_id(dp, &oldschema);
- else
- oldschema = 0;
+ oldschema = 0;
+ dp = data->incoredata;
+ if (dp)
+ {
+ dp += entry >= 0 ? data->incoreoffset[entry] : 1;
+ dp = data_read_id(dp, &oldschema);
+ }
#if 0
fprintf(stderr, "oldschema %d\n", oldschema);
fprintf(stderr, "schemata %d\n", data->schemata[oldschema]);
@@ -1923,6 +2492,7 @@
/* seen: -1: old data 0: skipped >0: id + 1 */
newschema = 0;
oldcount = 0;
+ sp = schema;
for (keyp = data->schemadata + data->schemata[oldschema]; *keyp; keyp++)
{
if (seen[*keyp])
@@ -1934,8 +2504,21 @@
*sp++ = *keyp;
oldcount++;
}
- handle = entry < 0 ? data->extraattrs[-1 - entry] : data->attrs[entry];
- keyp = data->structs[handle];
+ if (entry >= 0)
+ keyp = data->attrs ? data->attrs[entry] : 0;
+ else
+ {
+ /* strip solvables key */
+ *sp = 0;
+ for (sp = keyp = schema; *sp; sp++)
+ if (*sp != solvkeyid)
+ *keyp++ = *sp;
+ else
+ oldcount--;
+ sp = keyp;
+ seen[solvkeyid] = 0;
+ keyp = data->xattrs ? data->xattrs[1] : 0;
+ }
if (keyp)
for (; *keyp; keyp += 2)
{
@@ -1946,14 +2529,19 @@
}
seen[*keyp] = keyp[1] + 1;
}
- *sp++ = 0;
+ if (entry < 0 && data->end != data->start)
+ {
+ *sp++ = solvkeyid;
+ newschema = 1;
+ }
+ *sp = 0;
if (newschema)
/* Ideally we'd like to sort the new schema here, to ensure
schema equality independend of the ordering. We can't do that
yet. For once see below (old ids need to come before new ids).
An additional difficulty is that we also need to move
the values with the keys. */
- schemaid = addschema(data, schema, schematacache);
+ schemaid = repodata_schema2id(data, schema, 1);
else
schemaid = oldschema;
@@ -1965,13 +2553,25 @@
(oX being the old keyids (possibly overwritten), and nX being
the new keyids). This rules out sorting the keyids in order
to ensure a small schema count. */
- if (entry < 0)
- data->extraoffset[-1 - entry] = newincore.len;
- else
- data->incoreoffset[entry] = newincore.len;
+ if (entry >= 0)
+ data->incoreoffset[entry] = newincore.len;
data_addid(&newincore, schemaid);
- for (keyp = data->schemadata + data->schemata[schemaid]; *keyp; keyp++)
+ if (entry == -1)
+ {
+ data->mainschema = schemaid;
+ data->mainschemaoffsets = sat_calloc(sp - schema, sizeof(Id));
+ }
+ keypstart = data->schemadata + data->schemata[schemaid];
+ for (keyp = keypstart; *keyp; keyp++)
{
+ if (entry == -1)
+ data->mainschemaoffsets[keyp - keypstart] = newincore.len;
+ if (*keyp == solvkeyid)
+ {
+ /* add flexarray entry count */
+ data_addid(&newincore, data->end - data->start);
+ break;
+ }
key = data->keys + *keyp;
#if 0
fprintf(stderr, "internalize %d:%s:%s\n", entry, id2str(data->repo->pool, key->name), id2str(data->repo->pool, key->type));
@@ -1986,7 +2586,7 @@
ndp = data_skip(ndp, REPOKEY_TYPE_ID);
}
else if (key->storage == KEY_STORAGE_INCORE)
- ndp = data_skip_recursive(data, dp, key);
+ ndp = data_skip_key(data, dp, key);
oldcount--;
}
if (seen[*keyp] == -1)
@@ -2003,23 +2603,26 @@
/* Otherwise we have a new value. Parse it into the internal
form. */
repodata_serialize_key(data, &newincore, &newvincore,
- schema, schematacache,
- key, seen[*keyp] - 1);
+ schema, key, seen[*keyp] - 1);
}
dp = ndp;
}
- if (data->structs[handle])
- data->structs[handle] = sat_free(data->structs[handle]);
+ if (entry >= 0 && data->attrs && data->attrs[entry])
+ data->attrs[entry] = sat_free(data->attrs[entry]);
}
- for (entry = 0; entry < data->nstructs; entry++)
- if (data->structs[entry])
- sat_free(data->structs[entry]);
- data->structs = sat_free(data->structs);
+ /* free all xattrs */
+ for (entry = 0; entry < data->nxattrs; entry++)
+ if (data->xattrs[entry])
+ sat_free(data->xattrs[entry]);
+ data->xattrs = sat_free(data->xattrs);
+ data->nxattrs = 0;
+
data->lasthandle = 0;
data->lastkey = 0;
data->lastdatalen = 0;
sat_free(schema);
sat_free(seen);
+ repodata_free_schemahash(data);
sat_free(data->incoredata);
data->incoredata = newincore.buf;
@@ -2031,220 +2634,20 @@
data->vincorelen = newvincore.len;
data->attrs = sat_free(data->attrs);
- data->extraattrs = sat_free(data->extraattrs);
data->attrdata = sat_free(data->attrdata);
data->attriddata = sat_free(data->attriddata);
data->attrdatalen = 0;
data->attriddatalen = 0;
}
-Id
-repodata_str2dir(Repodata *data, const char *dir, int create)
-{
- Id id, parent;
- const char *dire;
-
- parent = 0;
- while (*dir == '/' && dir[1] == '/')
- dir++;
- if (*dir == '/' && !dir[1])
- return 1;
- while (*dir)
- {
- dire = strchrnul(dir, '/');
- if (data->localpool)
- id = stringpool_strn2id(&data->spool, dir, dire - dir, create);
- else
- id = strn2id(data->repo->pool, dir, dire - dir, create);
- if (!id)
- return 0;
- parent = dirpool_add_dir(&data->dirpool, parent, id, create);
- if (!parent)
- return 0;
- if (!*dire)
- break;
- dir = dire + 1;
- while (*dir == '/')
- dir++;
- }
- return parent;
-}
-
-const char *
-repodata_dir2str(Repodata *data, Id did, const char *suf)
-{
- Pool *pool = data->repo->pool;
- int l = 0;
- Id parent, comp;
- const char *comps;
- char *p;
-
- if (!did)
- return suf ? suf : "";
- parent = did;
- while (parent)
- {
- comp = dirpool_compid(&data->dirpool, parent);
- comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
- l += strlen(comps);
- parent = dirpool_parent(&data->dirpool, parent);
- if (parent)
- l++;
- }
- if (suf)
- l += strlen(suf) + 1;
- p = pool_alloctmpspace(pool, l + 1) + l;
- *p = 0;
- if (suf)
- {
- p -= strlen(suf);
- strcpy(p, suf);
- *--p = '/';
- }
- parent = did;
- while (parent)
- {
- comp = dirpool_compid(&data->dirpool, parent);
- comps = stringpool_id2str(data->localpool ? &data->spool : &pool->ss, comp);
- l = strlen(comps);
- p -= l;
- strncpy(p, comps, l);
- parent = dirpool_parent(&data->dirpool, parent);
- if (parent)
- *--p = '/';
- }
- return p;
-}
-
-unsigned int
-repodata_compress_page(unsigned char *page, unsigned int len, unsigned char *cpage, unsigned int max)
-{
- return compress_buf(page, len, cpage, max);
-}
-
-#define SOLV_ERROR_EOF 3
-
-static inline unsigned int
-read_u32(FILE *fp)
-{
- int c, i;
- unsigned int x = 0;
-
- for (i = 0; i < 4; i++)
- {
- c = getc(fp);
- if (c == EOF)
- return 0;
- x = (x << 8) | c;
- }
- return x;
-}
-
-#define SOLV_ERROR_EOF 3
-#define SOLV_ERROR_CORRUPT 6
-
-/* Try to either setup on-demand paging (using FP as backing
- file), or in case that doesn't work (FP not seekable) slurps in
- all pages and deactivates paging. */
-void
-repodata_read_or_setup_pages(Repodata *data, unsigned int pagesz, unsigned int blobsz)
-{
- FILE *fp = data->fp;
- unsigned int npages;
- unsigned int i;
- unsigned int can_seek;
- long cur_file_ofs;
- unsigned char buf[BLOB_PAGESIZE];
-
- if (pagesz != BLOB_PAGESIZE)
- {
- /* We could handle this by slurping in everything. */
- data->error = SOLV_ERROR_CORRUPT;
- return;
- }
- can_seek = 1;
- if ((cur_file_ofs = ftell(fp)) < 0)
- can_seek = 0;
- clearerr(fp);
- if (can_seek)
- data->pagefd = dup(fileno(fp));
- if (data->pagefd == -1)
- can_seek = 0;
-
-#ifdef DEBUG_PAGING
- fprintf (stderr, "can %sseek\n", can_seek ? "" : "NOT ");
-#endif
- npages = (blobsz + BLOB_PAGESIZE - 1) / BLOB_PAGESIZE;
-
- data->num_pages = npages;
- data->pages = sat_malloc2(npages, sizeof(data->pages[0]));
-
- /* If we can't seek on our input we have to slurp in everything. */
- if (!can_seek)
- data->blob_store = sat_malloc(npages * BLOB_PAGESIZE);
- for (i = 0; i < npages; i++)
- {
- unsigned int in_len = read_u32(fp);
- unsigned int compressed = in_len & 1;
- Attrblobpage *p = data->pages + i;
- in_len >>= 1;
-#ifdef DEBUG_PAGING
- fprintf (stderr, "page %d: len %d (%scompressed)\n",
- i, in_len, compressed ? "" : "not ");
-#endif
- if (can_seek)
- {
- cur_file_ofs += 4;
- p->mapped_at = -1;
- p->file_offset = cur_file_ofs;
- p->file_size = in_len * 2 + compressed;
- if (fseek(fp, in_len, SEEK_CUR) < 0)
- {
- perror ("fseek");
- fprintf (stderr, "can't seek after we thought we can\n");
- /* We can't fall back to non-seeking behaviour as we already
- read over some data pages without storing them away. */
- data->error = SOLV_ERROR_EOF;
- close(data->pagefd);
- data->pagefd = -1;
- return;
- }
- cur_file_ofs += in_len;
- }
- else
- {
- unsigned int out_len;
- void *dest = data->blob_store + i * BLOB_PAGESIZE;
- p->mapped_at = i * BLOB_PAGESIZE;
- p->file_offset = 0;
- p->file_size = 0;
- /* We can't seek, so suck everything in. */
- if (fread(compressed ? buf : dest, in_len, 1, fp) != 1)
- {
- perror("fread");
- data->error = SOLV_ERROR_EOF;
- return;
- }
- if (compressed)
- {
- out_len = unchecked_decompress_buf(buf, in_len, dest, BLOB_PAGESIZE);
- if (out_len != BLOB_PAGESIZE && i < npages - 1)
- {
- data->error = SOLV_ERROR_CORRUPT;
- return;
- }
- }
- }
- }
-}
-
void
repodata_disable_paging(Repodata *data)
{
if (maybe_load_repodata(data, 0)
&& data->num_pages)
- load_page_range (data, 0, data->num_pages - 1);
+ repodata_load_page_range(data, 0, data->num_pages - 1);
}
+
/*
vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
*/
Modified: trunk/sat-solver/src/repodata.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repodata.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repodata.h (original)
+++ trunk/sat-solver/src/repodata.h Thu Oct 9 14:47:05 2008
@@ -40,6 +40,11 @@
long file_size;
} Attrblobpage;
+typedef struct _Repopos {
+ Id schema;
+ Id dp;
+} Repopos;
+
typedef struct _Repodata {
struct _Repo *repo; /* back pointer to repo */
@@ -50,15 +55,9 @@
int state; /* available, stub or error */
void (*loadcallback)(struct _Repodata *);
- char *location; /* E.g. filename or the like */
- char *checksum; /* Checksum of the file */
- unsigned nchecksum; /* Length of the checksum */
- unsigned checksumtype; /* Type of checksum */
int start; /* start of solvables this repodata is valid for */
int end; /* last solvable + 1 of this repodata */
- int extrastart;
- int nextra;
FILE *fp; /* file pointer of solv file */
int error; /* corrupt solv file */
@@ -66,24 +65,27 @@
struct _Repokey *keys; /* keys, first entry is always zero */
unsigned int nkeys; /* length of keys array */
+ unsigned char keybits[32]; /* keyname hash */
Id *schemata; /* schema -> offset into schemadata */
unsigned int nschemata; /* number of schemata */
-
Id *schemadata; /* schema storage */
unsigned int schemadatalen; /* schema storage size */
+ Id *schematahash; /* unification helper */
Stringpool spool; /* local string pool */
int localpool; /* is local string pool used */
Dirpool dirpool; /* local dir pool */
+ Id mainschema;
+ Id *mainschemaoffsets;
+
unsigned char *incoredata; /* in-core data (flat_attrs) */
unsigned int incoredatalen; /* data len (attr_next_free) */
unsigned int incoredatafree; /* free data len */
Id *incoreoffset; /* offset for all entries (ent2attr) */
- Id *extraoffset; /* offset for all extra entries */
Id *verticaloffset; /* offset for all verticals, nkeys elements */
Id lastverticaloffset; /* end of verticals */
@@ -102,36 +104,48 @@
unsigned char *vincore;
unsigned int vincorelen;
- Id *attrs; /* un-internalized attributes */
- Id *extraattrs; /* Same, but for extra objects. */
+ Id **attrs; /* un-internalized attributes */
+ Id **xattrs; /* anonymous handles */
+ int nxattrs;
+
unsigned char *attrdata; /* their string data space */
unsigned int attrdatalen;
Id *attriddata; /* their id space */
unsigned int attriddatalen;
- Id **structs; /* key-value lists */
- unsigned int nstructs;
/* array cache */
Id lasthandle;
Id lastkey;
Id lastdatalen;
- Id *addedfileprovides;
+ Repopos pos;
+
} Repodata;
+#define REPOENTRY_META -1
+#define REPOENTRY_POS -2
+#define REPOENTRY_SUBSCHEMA -3 /* internal! */
/*-----
* management functions
*/
void repodata_init(Repodata *data, struct _Repo *repo, int localpool);
void repodata_extend(Repodata *data, Id p);
-void repodata_extend_extra(Repodata *data, int nextra);
void repodata_extend_block(Repodata *data, Id p, int num);
void repodata_free(Repodata *data);
/* internalize repodata into .solv, required before writing out a .solv file */
void repodata_internalize(Repodata *data);
+Id repodata_key2id(Repodata *data, struct _Repokey *key, int create);
+Id repodata_schema2id(Repodata *data, Id *schema, int create);
+
+static inline int
+repodata_precheck_keyname(Repodata *data, Id keyname)
+{
+ unsigned char x = data->keybits[(keyname >> 3) & (sizeof(data->keybits) - 1)];
+ return x && (x & (1 << (keyname & 7))) ? 1 : 0;
+}
/*----
* access functions
@@ -144,22 +158,20 @@
void repodata_search(Repodata *data, Id entry, Id keyname, int (*callback)(void *cbdata, Solvable *s, Repodata *data, struct _Repokey *key, struct _KeyValue *kv), void *cbdata);
/* lookup functions */
-Id repodata_lookup_id(Repodata *data, Id entry, Id keyid);
-const char *repodata_lookup_str(Repodata *data, Id entry, Id keyid);
-int repodata_lookup_num(Repodata *data, Id entry, Id keyid, unsigned int *value);
-int repodata_lookup_void(Repodata *data, Id entry, Id keyid);
-const unsigned char *repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyid, Id *typep);
+Id repodata_lookup_id(Repodata *data, Id entry, Id keyname);
+const char *repodata_lookup_str(Repodata *data, Id entry, Id keyname);
+int repodata_lookup_num(Repodata *data, Id entry, Id keyname, unsigned int *value);
+int repodata_lookup_void(Repodata *data, Id entry, Id keyname);
+const unsigned char *repodata_lookup_bin_checksum(Repodata *data, Id entry, Id keyname, Id *typep);
/*-----
* data assignment functions
*/
-/* Returns a handle for the attributes of ENTRY. ENTRY >= 0
- corresponds to data associated with a solvable, ENTRY < 0 is
- extra data. The returned handle is used in the various repodata_set_*
- functions to add attributes to it. */
-Id repodata_get_handle(Repodata *data, Id entry);
+/* create an anonymous handle. useful for substructures like
+ * fixarray/flexarray */
+Id repodata_new_handle(Repodata *data);
/* basic types: void, num, string, Id */
@@ -191,11 +203,10 @@
/* Arrays */
void repodata_add_idarray(Repodata *data, Id handle, Id keyname, Id id);
-void repodata_add_poolstr_array(Repodata *data, Id handle, Id keyname,
- const char *str);
-/* Creates a new substructure. Returns a handle for it (usable with the
- other repodata_{set,add}_* functions. */
-Id repodata_create_struct(Repodata *data, Id handle, Id keyname);
+void repodata_add_poolstr_array(Repodata *data, Id handle, Id keyname, const char *str);
+void repodata_add_fixarray(Repodata *data, Id handle, Id keyname, Id ghandle);
+void repodata_add_flexarray(Repodata *data, Id handle, Id keyname, Id ghandle);
+
/*-----
* data management
@@ -216,8 +227,4 @@
const char *repodata_dir2str(Repodata *data, Id did, const char *suf);
const char *repodata_chk2str(Repodata *data, Id type, const unsigned char *buf);
-/* internal */
-unsigned int repodata_compress_page(unsigned char *, unsigned int, unsigned char *, unsigned int);
-void repodata_read_or_setup_pages(Repodata *data, unsigned int pagesz, unsigned int blobsz);
-
#endif /* SATSOLVER_REPODATA_H */
Modified: trunk/sat-solver/src/repopack.h
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repopack.h?rev=11279&r1=11278&r2=11279&view=diff
==============================================================================
--- trunk/sat-solver/src/repopack.h (original)
+++ trunk/sat-solver/src/repopack.h Thu Oct 9 14:47:05 2008
@@ -103,9 +103,11 @@
dp = data_read_id(dp, &kv->id);
dp = data_read_id(dp, &kv->num);
return data_read_ideof(dp, &kv->num2, &kv->eof);
- case REPOKEY_TYPE_COUNTED:
+ case REPOKEY_TYPE_FIXARRAY:
dp = data_read_id(dp, &kv->num);
return data_read_id(dp, &kv->id);
+ case REPOKEY_TYPE_FLEXARRAY:
+ return data_read_id(dp, &kv->num);
default:
return 0;
}
@@ -171,13 +173,6 @@
return dp + 1;
dp++;
}
- case REPOKEY_TYPE_COUNTED:
- while ((*dp & 0x80) != 0)
- dp++;
- dp++;
- while ((*dp & 0x80) != 0)
- dp++;
- return dp + 1;
default:
return 0;
}
@@ -257,19 +252,11 @@
return dp + 1;
dp++;
}
- case REPOKEY_TYPE_COUNTED:
- while ((*dp & 0x80) != 0)
- dp++;
- dp++;
- while ((*dp & 0x80) != 0)
- dp++;
- return dp + 1;
default:
return 0;
}
}
-unsigned char * data_skip_recursive(Repodata *data, unsigned char *dp,
- Repokey *key);
+unsigned char *data_skip_key(Repodata *data, unsigned char *dp, Repokey *key);
#endif /* SATSOLVER_REPOPACK */
Added: trunk/sat-solver/src/repopage.c
URL: http://svn.opensuse.org/viewcvs/zypp/trunk/sat-solver/src/repopage.c?rev=11279&view=auto
==============================================================================
--- trunk/sat-solver/src/repopage.c (added)
+++ trunk/sat-solver/src/repopage.c Thu Oct 9 14:47:05 2008
@@ -0,0 +1,1024 @@
+/*
+ * Copyright (c) 2007-2008, Novell Inc.
+ *
+ * This program is licensed under the BSD license, read LICENSE.BSD
+ * for further information
+ */
+
+/*
+ * repopage.c
+ *
+ * Pageing and compression functions for the vertical repository data
+ *
+ */
+
+#define _XOPEN_SOURCE 500
+
+#include