hash_upgrade.c

Go to the documentation of this file.
00001 /*-
00002  * See the file LICENSE for redistribution information.
00003  *
00004  * Copyright (c) 1996, 1997, 1998, 1999, 2000
00005  *      Sleepycat Software.  All rights reserved.
00006  */
00007 #include "config.h"
00008 
00009 #ifndef lint
00010 static const char revid[] = "$Id: hash__upgrade_8c-source.html,v 1.1 2008/06/08 10:19:39 sebdiaz Exp $";
00011 #endif /* not lint */
00012 
00013 #ifndef NO_SYSTEM_INCLUDES
00014 #include <sys/types.h>
00015 
00016 #include <errno.h>
00017 #include <limits.h>
00018 #include <string.h>
00019 #endif
00020 
00021 #include "db_int.h"
00022 #include "db_page.h"
00023 #include "db_swap.h"
00024 #include "hash.h"
00025 #include "db_upgrade.h"
00026 
00027 /*
00028  * CDB___ham_30_hashmeta --
00029  *      Upgrade the database from version 4/5 to version 6.
00030  *
00031  * PUBLIC: int CDB___ham_30_hashmeta __P((DB *, char *, u_int8_t *));
00032  */
00033 int
00034 CDB___ham_30_hashmeta(dbp, real_name, obuf)
00035         DB *dbp;
00036         char *real_name;
00037         u_int8_t *obuf;
00038 {
00039         DB_ENV *dbenv;
00040         HASHHDR *oldmeta;
00041         HMETA30 newmeta;
00042         u_int32_t *o_spares, *n_spares;
00043         u_int32_t fillf, maxb, nelem;
00044         int i, non_zero, ret;
00045 
00046         dbenv = dbp->dbenv;
00047         memset(&newmeta, 0, sizeof(newmeta));
00048 
00049         oldmeta = (HASHHDR *)obuf;
00050 
00051         /*
00052          * The first 32 bytes are similar.  The only change is the version
00053          * and that we removed the ovfl_point and have the page type now.
00054          */
00055 
00056         newmeta.dbmeta.lsn = oldmeta->lsn;
00057         newmeta.dbmeta.pgno = oldmeta->pgno;
00058         newmeta.dbmeta.magic = oldmeta->magic;
00059         newmeta.dbmeta.version = 6;
00060         newmeta.dbmeta.pagesize = oldmeta->pagesize;
00061         newmeta.dbmeta.type = P_HASHMETA;
00062 
00063         /* Move flags */
00064         newmeta.dbmeta.flags = oldmeta->flags;
00065 
00066         /* Copy: max_bucket, high_mask, low-mask, ffactor, nelem, h_charkey */
00067         newmeta.max_bucket = oldmeta->max_bucket;
00068         newmeta.high_mask = oldmeta->high_mask;
00069         newmeta.low_mask = oldmeta->low_mask;
00070         newmeta.ffactor = oldmeta->ffactor;
00071         newmeta.nelem = oldmeta->nelem;
00072         newmeta.h_charkey = oldmeta->h_charkey;
00073 
00074         /*
00075          * There was a bug in 2.X versions where the nelem could go negative.
00076          * In general, this is considered "bad."  If it does go negative
00077          * (that is, very large and positive), we'll die trying to dump and
00078          * load this database.  So, let's see if we can fix it here.
00079          */
00080         nelem = newmeta.nelem;
00081         fillf = newmeta.ffactor;
00082         maxb = newmeta.max_bucket;
00083 
00084         if ((fillf != 0 && fillf * maxb < 2 * nelem) ||
00085             (fillf == 0 && nelem > 0x8000000))
00086                 newmeta.nelem = 0;
00087 
00088         /*
00089          * We now have to convert the spares array.  The old spares array
00090          * contained the total number of extra pages allocated prior to
00091          * the bucket that begins the next doubling.  The new spares array
00092          * contains the page number of the first bucket in the next doubling
00093          * MINUS the bucket number of that bucket.
00094          */
00095         o_spares = oldmeta->spares;
00096         n_spares = newmeta.spares;
00097         non_zero = 0;
00098         n_spares[0] = 1;
00099         for (i = 1; i < NCACHED; i++) {
00100                 non_zero = non_zero || o_spares[i - 1] != 0;
00101                 if (o_spares[i - 1] == 0 && non_zero)
00102                         n_spares[i] = 0;
00103                 else
00104                         n_spares[i] = 1 + o_spares[i - 1];
00105         }
00106 
00107                                         /* Replace the unique ID. */
00108         if ((ret = CDB___os_fileid(dbenv, real_name, 1, newmeta.dbmeta.uid)) != 0)
00109                 return (ret);
00110 
00111         /* Overwrite the original. */
00112         memcpy(oldmeta, &newmeta, sizeof(newmeta));
00113 
00114         return (0);
00115 }
00116 
00117 /*
00118  * CDB___ham_31_hashmeta --
00119  *      Upgrade the database from version 6 to version 7.
00120  *
00121  * PUBLIC: int CDB___ham_31_hashmeta
00122  * PUBLIC:      __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
00123  */
00124 int
00125 CDB___ham_31_hashmeta(dbp, real_name, flags, fhp, h, dirtyp)
00126         DB *dbp;
00127         char *real_name;
00128         u_int32_t flags;
00129         DB_FH *fhp;
00130         PAGE *h;
00131         int *dirtyp;
00132 {
00133         HMETA31 *newmeta;
00134         HMETA30 *oldmeta;
00135 
00136         COMPQUIET(dbp, NULL);
00137         COMPQUIET(real_name, NULL);
00138         COMPQUIET(fhp, NULL);
00139 
00140         newmeta = (HMETA31 *)h;
00141         oldmeta = (HMETA30 *)h;
00142 
00143         /*
00144          * Copy the fields down the page.
00145          * The fields may overlap so start at the bottom and use memmove().
00146          */
00147         memmove(newmeta->spares, oldmeta->spares, sizeof(oldmeta->spares));
00148         newmeta->h_charkey = oldmeta->h_charkey;
00149         newmeta->nelem = oldmeta->nelem;
00150         newmeta->ffactor = oldmeta->ffactor;
00151         newmeta->low_mask = oldmeta->low_mask;
00152         newmeta->high_mask = oldmeta->high_mask;
00153         newmeta->max_bucket = oldmeta->max_bucket;
00154         memmove(newmeta->dbmeta.uid,
00155             oldmeta->dbmeta.uid, sizeof(oldmeta->dbmeta.uid));
00156         newmeta->dbmeta.flags = oldmeta->dbmeta.flags;
00157         newmeta->dbmeta.record_count = 0;
00158         newmeta->dbmeta.key_count = 0;
00159         ZERO_LSN(newmeta->dbmeta.alloc_lsn);
00160 
00161         /* Update the version. */
00162         newmeta->dbmeta.version = 7;
00163 
00164         /* Upgrade the flags. */
00165         if (LF_ISSET(DB_DUPSORT))
00166                 F_SET(&newmeta->dbmeta, DB_HASH_DUPSORT);
00167 
00168         *dirtyp = 1;
00169         return (0);
00170 }
00171 
00172 /*
00173  * CDB___ham_31_hash --
00174  *      Upgrade the database hash leaf pages.
00175  *
00176  * PUBLIC: int CDB___ham_31_hash
00177  * PUBLIC:      __P((DB *, char *, u_int32_t, DB_FH *, PAGE *, int *));
00178  */
00179 int
00180 CDB___ham_31_hash(dbp, real_name, flags, fhp, h, dirtyp)
00181         DB *dbp;
00182         char *real_name;
00183         u_int32_t flags;
00184         DB_FH *fhp;
00185         PAGE *h;
00186         int *dirtyp;
00187 {
00188         HKEYDATA *hk;
00189         db_pgno_t pgno, tpgno;
00190         db_indx_t indx;
00191         int ret;
00192 
00193         COMPQUIET(flags, 0);
00194 
00195         ret = 0;
00196         for (indx = 0; indx < NUM_ENT(h); indx += 2) {
00197                 hk = (HKEYDATA *)H_PAIRDATA(h, indx);
00198                 if (HPAGE_PTYPE(hk) == H_OFFDUP) {
00199                         memcpy(&pgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
00200                         tpgno = pgno;
00201                         if ((ret = CDB___db_31_offdup(dbp, real_name, fhp,
00202                             LF_ISSET(DB_DUPSORT) ? 1 : 0, &tpgno)) != 0)
00203                                 break;
00204                         if (pgno != tpgno) {
00205                                 *dirtyp = 1;
00206                                 memcpy(HOFFDUP_PGNO(hk),
00207                                     &tpgno, sizeof(db_pgno_t));
00208                         }
00209                 }
00210         }
00211 
00212         return (ret);
00213 }

Generated on Sun Jun 8 10:56:37 2008 for GNUmifluz by  doxygen 1.5.5