aboutsummaryrefslogtreecommitdiffstats
path: root/db1-ast/hash
diff options
context:
space:
mode:
authormarkster <markster@f38db490-d61c-443f-a65b-d21fe96a405b>2003-01-04 18:53:14 +0000
committermarkster <markster@f38db490-d61c-443f-a65b-d21fe96a405b>2003-01-04 18:53:14 +0000
commitf596b0379253a694a6ce5eb725e6f0d9dd1fc46b (patch)
treec5ad8b7080744ed0037c1cce4a7bb9365b6d9428 /db1-ast/hash
parent4e12b55c83563e841b0e39a492cbb71d0c53640c (diff)
Version 0.3.0 from FTP
git-svn-id: http://svn.digium.com/svn/asterisk/trunk@578 f38db490-d61c-443f-a65b-d21fe96a405b
Diffstat (limited to 'db1-ast/hash')
-rwxr-xr-xdb1-ast/hash/README72
-rwxr-xr-xdb1-ast/hash/extern.h65
-rwxr-xr-xdb1-ast/hash/hash.c999
-rwxr-xr-xdb1-ast/hash/hash.h293
-rwxr-xr-xdb1-ast/hash/hash_bigkey.c668
-rwxr-xr-xdb1-ast/hash/hash_buf.c355
-rwxr-xr-xdb1-ast/hash/hash_func.c212
-rwxr-xr-xdb1-ast/hash/hash_log2.c56
-rwxr-xr-xdb1-ast/hash/hash_page.c944
-rwxr-xr-xdb1-ast/hash/hsearch.c107
-rwxr-xr-xdb1-ast/hash/ndbm.c215
-rwxr-xr-xdb1-ast/hash/page.h92
-rwxr-xr-xdb1-ast/hash/search.h51
13 files changed, 4129 insertions, 0 deletions
diff --git a/db1-ast/hash/README b/db1-ast/hash/README
new file mode 100755
index 000000000..f29ccf7e1
--- /dev/null
+++ b/db1-ast/hash/README
@@ -0,0 +1,72 @@
+# @(#)README 8.1 (Berkeley) 6/4/93
+
+This package implements a superset of the hsearch and dbm/ndbm libraries.
+
+Test Programs:
+ All test programs which need key/data pairs expect them entered
+ with key and data on separate lines
+
+ tcreat3.c
+ Takes
+ bucketsize (bsize),
+ fill factor (ffactor), and
+ initial number of elements (nelem).
+ Creates a hash table named hashtest containing the
+ keys/data pairs entered from standard in.
+ thash4.c
+ Takes
+ bucketsize (bsize),
+ fill factor (ffactor),
+ initial number of elements (nelem)
+ bytes of cache (ncached), and
+ file from which to read data (fname)
+ Creates a table from the key/data pairs on standard in and
+ then does a read of each key/data in fname
+ tdel.c
+ Takes
+ bucketsize (bsize), and
+ fill factor (ffactor).
+ file from which to read data (fname)
+ Reads each key/data pair from fname and deletes the
+ key from the hash table hashtest
+ tseq.c
+ Reads the key/data pairs in the file hashtest and writes them
+ to standard out.
+ tread2.c
+ Takes
+ butes of cache (ncached).
+ Reads key/data pairs from standard in and looks them up
+ in the file hashtest.
+ tverify.c
+ Reads key/data pairs from standard in, looks them up
+ in the file hashtest, and verifies that the data is
+ correct.
+
+NOTES:
+
+The file search.h is provided for using the hsearch compatible interface
+on BSD systems. On System V derived systems, search.h should appear in
+/usr/include.
+
+The man page ../man/db.3 explains the interface to the hashing system.
+The file hash.ps is a postscript copy of a paper explaining
+the history, implementation, and performance of the hash package.
+
+"bugs" or idiosyncracies
+
+If you have a lot of overflows, it is possible to run out of overflow
+pages. Currently, this will cause a message to be printed on stderr.
+Eventually, this will be indicated by a return error code.
+
+If you are using the ndbm interface and exit without flushing or closing the
+file, you may lose updates since the package buffers all writes. Also,
+the db interface only creates a single database file. To avoid overwriting
+the user's original file, the suffix ".db" is appended to the file name
+passed to dbm_open. Additionally, if your code "knows" about the historic
+.dir and .pag files, it will break.
+
+There is a fundamental difference between this package and the old hsearch.
+Hsearch requires the user to maintain the keys and data in the application's
+allocated memory while hash takes care of all storage management. The down
+side is that the byte strings passed in the ENTRY structure must be null
+terminated (both the keys and the data).
diff --git a/db1-ast/hash/extern.h b/db1-ast/hash/extern.h
new file mode 100755
index 000000000..4f1f23d67
--- /dev/null
+++ b/db1-ast/hash/extern.h
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 1991, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)extern.h 8.4 (Berkeley) 6/16/94
+ */
+
+BUFHEAD *__add_ovflpage __P((HTAB *, BUFHEAD *));
+int __addel __P((HTAB *, BUFHEAD *, const DBT *, const DBT *));
+int __big_delete __P((HTAB *, BUFHEAD *));
+int __big_insert __P((HTAB *, BUFHEAD *, const DBT *, const DBT *));
+int __big_keydata __P((HTAB *, BUFHEAD *, DBT *, DBT *, int));
+int __big_return __P((HTAB *, BUFHEAD *, int, DBT *, int));
+int __big_split __P((HTAB *, BUFHEAD *, BUFHEAD *, BUFHEAD *,
+ int, u_int32_t, SPLIT_RETURN *));
+int __buf_free __P((HTAB *, int, int));
+void __buf_init __P((HTAB *, int));
+u_int32_t __call_hash __P((HTAB *, char *, int));
+int __delpair __P((HTAB *, BUFHEAD *, int));
+int __expand_table __P((HTAB *));
+int __find_bigpair __P((HTAB *, BUFHEAD *, int, char *, int));
+u_int16_t __find_last_page __P((HTAB *, BUFHEAD **));
+void __free_ovflpage __P((HTAB *, BUFHEAD *));
+BUFHEAD *__get_buf __P((HTAB *, u_int32_t, BUFHEAD *, int));
+int __get_page __P((HTAB *, char *, u_int32_t, int, int, int));
+int __ibitmap __P((HTAB *, int, int, int));
+u_int32_t __hash_log2 __P((u_int32_t));
+int __put_page __P((HTAB *, char *, u_int32_t, int, int));
+void __reclaim_buf __P((HTAB *, BUFHEAD *));
+int __split_page __P((HTAB *, u_int32_t, u_int32_t));
+
+/* Default hash routine. */
+extern u_int32_t (*__default_hash) __P((const void *, size_t));
+
+#ifdef HASH_STATISTICS
+extern int hash_accesses, hash_collisions, hash_expansions, hash_overflows;
+#endif
diff --git a/db1-ast/hash/hash.c b/db1-ast/hash/hash.c
new file mode 100755
index 000000000..99592eac5
--- /dev/null
+++ b/db1-ast/hash/hash.c
@@ -0,0 +1,999 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hash.c 8.9 (Berkeley) 6/16/94";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/param.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#ifdef DEBUG
+#include <assert.h>
+#endif
+
+#include <db.h>
+#include "hash.h"
+#include "page.h"
+#include "extern.h"
+
+static int alloc_segs __P((HTAB *, int));
+static int flush_meta __P((HTAB *));
+static int hash_access __P((HTAB *, ACTION, DBT *, DBT *));
+static int hash_close __P((DB *));
+static int hash_delete __P((const DB *, const DBT *, u_int32_t));
+static int hash_fd __P((const DB *));
+static int hash_get __P((const DB *, const DBT *, DBT *, u_int32_t));
+static int hash_put __P((const DB *, DBT *, const DBT *, u_int32_t));
+static void *hash_realloc __P((SEGMENT **, int, int));
+static int hash_seq __P((const DB *, DBT *, DBT *, u_int32_t));
+static int hash_sync __P((const DB *, u_int32_t));
+static int hdestroy __P((HTAB *));
+static HTAB *init_hash __P((HTAB *, const char *, HASHINFO *));
+static int init_htab __P((HTAB *, int));
+#if BYTE_ORDER == LITTLE_ENDIAN
+static void swap_header __P((HTAB *));
+static void swap_header_copy __P((HASHHDR *, HASHHDR *));
+#endif
+
+/* Fast arithmetic, relying on powers of 2, */
+#define MOD(x, y) ((x) & ((y) - 1))
+
+#define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }
+
+/* Return values */
+#define SUCCESS (0)
+#define ERROR (-1)
+#define ABNORMAL (1)
+
+#ifdef HASH_STATISTICS
+int hash_accesses, hash_collisions, hash_expansions, hash_overflows;
+#endif
+
+/************************** INTERFACE ROUTINES ***************************/
+/* OPEN/CLOSE */
+
+extern DB *
+__hash_open(file, flags, mode, info, dflags)
+ const char *file;
+ int flags, mode, dflags;
+ const HASHINFO *info; /* Special directives for create */
+{
+ HTAB *hashp;
+ struct stat statbuf;
+ DB *dbp;
+ int bpages, hdrsize, new_table, nsegs, save_errno;
+
+ if ((flags & O_ACCMODE) == O_WRONLY) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))
+ return (NULL);
+ hashp->fp = -1;
+
+ /*
+ * Even if user wants write only, we need to be able to read
+ * the actual file, so we need to open it read/write. But, the
+ * field in the hashp structure needs to be accurate so that
+ * we can check accesses.
+ */
+ hashp->flags = flags;
+
+ new_table = 0;
+ if (!file || (flags & O_TRUNC) ||
+ (stat(file, &statbuf) && (errno == ENOENT))) {
+ if (errno == ENOENT)
+ errno = 0; /* Just in case someone looks at errno */
+ new_table = 1;
+ }
+ if (file) {
+ if ((hashp->fp = open(file, flags, mode)) == -1)
+ RETURN_ERROR(errno, error0);
+ (void)fcntl(hashp->fp, F_SETFD, 1);
+ }
+ if (new_table) {
+ if (!(hashp = init_hash(hashp, file, (HASHINFO *)info)))
+ RETURN_ERROR(errno, error1);
+ } else {
+ /* Table already exists */
+ if (info && info->hash)
+ hashp->hash = info->hash;
+ else
+ hashp->hash = __default_hash;
+
+ hdrsize = read(hashp->fp, &hashp->hdr, sizeof(HASHHDR));
+#if BYTE_ORDER == LITTLE_ENDIAN
+ swap_header(hashp);
+#endif
+ if (hdrsize == -1)
+ RETURN_ERROR(errno, error1);
+ if (hdrsize != sizeof(HASHHDR))
+ RETURN_ERROR(EFTYPE, error1);
+ /* Verify file type, versions and hash function */
+ if (hashp->MAGIC != HASHMAGIC)
+ RETURN_ERROR(EFTYPE, error1);
+#define OLDHASHVERSION 1
+ if (hashp->VERSION != HASHVERSION &&
+ hashp->VERSION != OLDHASHVERSION)
+ RETURN_ERROR(EFTYPE, error1);
+ if (hashp->hash(CHARKEY, sizeof(CHARKEY))
+ != (u_int32_t) hashp->H_CHARKEY)
+ RETURN_ERROR(EFTYPE, error1);
+ /*
+ * Figure out how many segments we need. Max_Bucket is the
+ * maximum bucket number, so the number of buckets is
+ * max_bucket + 1.
+ */
+ nsegs = (hashp->MAX_BUCKET + 1 + hashp->SGSIZE - 1) /
+ hashp->SGSIZE;
+ hashp->nsegs = 0;
+ if (alloc_segs(hashp, nsegs))
+ /*
+ * If alloc_segs fails, table will have been destroyed
+ * and errno will have been set.
+ */
+ return (NULL);
+ /* Read in bitmaps */
+ bpages = (hashp->SPARES[hashp->OVFL_POINT] +
+ (hashp->BSIZE << BYTE_SHIFT) - 1) >>
+ (hashp->BSHIFT + BYTE_SHIFT);
+
+ hashp->nmaps = bpages;
+ (void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *));
+ }
+
+ /* Initialize Buffer Manager */
+ if (info && info->cachesize)
+ __buf_init(hashp, info->cachesize);
+ else
+ __buf_init(hashp, DEF_BUFSIZE);
+
+ hashp->new_file = new_table;
+ hashp->save_file = file && (hashp->flags & O_ACCMODE) != O_RDONLY;
+ hashp->cbucket = -1;
+ if (!(dbp = (DB *)malloc(sizeof(DB)))) {
+ save_errno = errno;
+ hdestroy(hashp);
+ errno = save_errno;
+ return (NULL);
+ }
+ dbp->internal = hashp;
+ dbp->close = hash_close;
+ dbp->del = hash_delete;
+ dbp->fd = hash_fd;
+ dbp->get = hash_get;
+ dbp->put = hash_put;
+ dbp->seq = hash_seq;
+ dbp->sync = hash_sync;
+ dbp->type = DB_HASH;
+
+#ifdef DEBUG
+ (void)fprintf(stderr,
+"%s\n%s%x\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
+ "init_htab:",
+ "TABLE POINTER ", hashp,
+ "BUCKET SIZE ", hashp->BSIZE,
+ "BUCKET SHIFT ", hashp->BSHIFT,
+ "DIRECTORY SIZE ", hashp->DSIZE,
+ "SEGMENT SIZE ", hashp->SGSIZE,
+ "SEGMENT SHIFT ", hashp->SSHIFT,
+ "FILL FACTOR ", hashp->FFACTOR,
+ "MAX BUCKET ", hashp->MAX_BUCKET,
+ "OVFL POINT ", hashp->OVFL_POINT,
+ "LAST FREED ", hashp->LAST_FREED,
+ "HIGH MASK ", hashp->HIGH_MASK,
+ "LOW MASK ", hashp->LOW_MASK,
+ "NSEGS ", hashp->nsegs,
+ "NKEYS ", hashp->NKEYS);
+#endif
+#ifdef HASH_STATISTICS
+ hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0;
+#endif
+ return (dbp);
+
+error1:
+ if (hashp != NULL)
+ (void)close(hashp->fp);
+
+error0:
+ free(hashp);
+ errno = save_errno;
+ return (NULL);
+}
+
+static int
+hash_close(dbp)
+ DB *dbp;
+{
+ HTAB *hashp;
+ int retval;
+
+ if (!dbp)
+ return (ERROR);
+
+ hashp = (HTAB *)dbp->internal;
+ retval = hdestroy(hashp);
+ free(dbp);
+ return (retval);
+}
+
+static int
+hash_fd(dbp)
+ const DB *dbp;
+{
+ HTAB *hashp;
+
+ if (!dbp)
+ return (ERROR);
+
+ hashp = (HTAB *)dbp->internal;
+ if (hashp->fp == -1) {
+ errno = ENOENT;
+ return (-1);
+ }
+ return (hashp->fp);
+}
+
+/************************** LOCAL CREATION ROUTINES **********************/
+static HTAB *
+init_hash(hashp, file, info)
+ HTAB *hashp;
+ const char *file;
+ HASHINFO *info;
+{
+#ifdef _STATBUF_ST_BLKSIZE
+ struct stat statbuf;
+#endif
+ int nelem;
+
+ nelem = 1;
+ hashp->NKEYS = 0;
+ hashp->LORDER = BYTE_ORDER;
+ hashp->BSIZE = DEF_BUCKET_SIZE;
+ hashp->BSHIFT = DEF_BUCKET_SHIFT;
+ hashp->SGSIZE = DEF_SEGSIZE;
+ hashp->SSHIFT = DEF_SEGSIZE_SHIFT;
+ hashp->DSIZE = DEF_DIRSIZE;
+ hashp->FFACTOR = DEF_FFACTOR;
+ hashp->hash = __default_hash;
+ memset(hashp->SPARES, 0, sizeof(hashp->SPARES));
+ memset(hashp->BITMAPS, 0, sizeof (hashp->BITMAPS));
+
+ /* Fix bucket size to be optimal for file system */
+#ifdef _STATBUF_ST_BLKSIZE
+ if (file != NULL) {
+ if (stat(file, &statbuf))
+ return (NULL);
+ hashp->BSIZE = statbuf.st_blksize;
+ hashp->BSHIFT = __hash_log2(hashp->BSIZE);
+ }
+#endif
+
+ if (info) {
+ if (info->bsize) {
+ /* Round pagesize up to power of 2 */
+ hashp->BSHIFT = __hash_log2(info->bsize);
+ hashp->BSIZE = 1 << hashp->BSHIFT;
+ if (hashp->BSIZE > MAX_BSIZE) {
+ errno = EINVAL;
+ return (NULL);
+ }
+ }
+ if (info->ffactor)
+ hashp->FFACTOR = info->ffactor;
+ if (info->hash)
+ hashp->hash = info->hash;
+ if (info->nelem)
+ nelem = info->nelem;
+ if (info->lorder) {
+ if (info->lorder != BIG_ENDIAN &&
+ info->lorder != LITTLE_ENDIAN) {
+ errno = EINVAL;
+ return (NULL);
+ }
+ hashp->LORDER = info->lorder;
+ }
+ }
+ /* init_htab should destroy the table and set errno if it fails */
+ if (init_htab(hashp, nelem))
+ return (NULL);
+ else
+ return (hashp);
+}
+/*
+ * This calls alloc_segs which may run out of memory. Alloc_segs will destroy
+ * the table and set errno, so we just pass the error information along.
+ *
+ * Returns 0 on No Error
+ */
+static int
+init_htab(hashp, nelem)
+ HTAB *hashp;
+ int nelem;
+{
+ register int nbuckets, nsegs;
+ int l2;
+
+ /*
+ * Divide number of elements by the fill factor and determine a
+ * desired number of buckets. Allocate space for the next greater
+ * power of two number of buckets.
+ */
+ nelem = (nelem - 1) / hashp->FFACTOR + 1;
+
+ l2 = __hash_log2(MAX(nelem, 2));
+ nbuckets = 1 << l2;
+
+ hashp->SPARES[l2] = l2 + 1;
+ hashp->SPARES[l2 + 1] = l2 + 1;
+ hashp->OVFL_POINT = l2;
+ hashp->LAST_FREED = 2;
+
+ /* First bitmap page is at: splitpoint l2 page offset 1 */
+ if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0))
+ return (-1);
+
+ hashp->MAX_BUCKET = hashp->LOW_MASK = nbuckets - 1;
+ hashp->HIGH_MASK = (nbuckets << 1) - 1;
+ hashp->HDRPAGES = ((MAX(sizeof(HASHHDR), MINHDRSIZE) - 1) >>
+ hashp->BSHIFT) + 1;
+
+ nsegs = (nbuckets - 1) / hashp->SGSIZE + 1;
+ nsegs = 1 << __hash_log2(nsegs);
+
+ if (nsegs > hashp->DSIZE)
+ hashp->DSIZE = nsegs;
+ return (alloc_segs(hashp, nsegs));
+}
+
+/********************** DESTROY/CLOSE ROUTINES ************************/
+
+/*
+ * Flushes any changes to the file if necessary and destroys the hashp
+ * structure, freeing all allocated space.
+ */
+static int
+hdestroy(hashp)
+ HTAB *hashp;
+{
+ int i, save_errno;
+
+ save_errno = 0;
+
+#ifdef HASH_STATISTICS
+ (void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n",
+ hash_accesses, hash_collisions);
+ (void)fprintf(stderr, "hdestroy: expansions %ld\n",
+ hash_expansions);
+ (void)fprintf(stderr, "hdestroy: overflows %ld\n",
+ hash_overflows);
+ (void)fprintf(stderr, "keys %ld maxp %d segmentcount %d\n",
+ hashp->NKEYS, hashp->MAX_BUCKET, hashp->nsegs);
+
+ for (i = 0; i < NCACHED; i++)
+ (void)fprintf(stderr,
+ "spares[%d] = %d\n", i, hashp->SPARES[i]);
+#endif
+ /*
+ * Call on buffer manager to free buffers, and if required,
+ * write them to disk.
+ */
+ if (__buf_free(hashp, 1, hashp->save_file))
+ save_errno = errno;
+ if (hashp->dir) {
+ free(*hashp->dir); /* Free initial segments */
+ /* Free extra segments */
+ while (hashp->exsegs--)
+ free(hashp->dir[--hashp->nsegs]);
+ free(hashp->dir);
+ }
+ if (flush_meta(hashp) && !save_errno)
+ save_errno = errno;
+ /* Free Bigmaps */
+ for (i = 0; i < hashp->nmaps; i++)
+ if (hashp->mapp[i])
+ free(hashp->mapp[i]);
+
+ if (hashp->fp != -1)
+ (void)close(hashp->fp);
+
+ free(hashp);
+
+ if (save_errno) {
+ errno = save_errno;
+ return (ERROR);
+ }
+ return (SUCCESS);
+}
+/*
+ * Write modified pages to disk
+ *
+ * Returns:
+ * 0 == OK
+ * -1 ERROR
+ */
+static int
+hash_sync(dbp, flags)
+ const DB *dbp;
+ u_int32_t flags;
+{
+ HTAB *hashp;
+
+ if (flags != 0) {
+ errno = EINVAL;
+ return (ERROR);
+ }
+
+ if (!dbp)
+ return (ERROR);
+
+ hashp = (HTAB *)dbp->internal;
+ if (!hashp->save_file)
+ return (0);
+ if (__buf_free(hashp, 0, 1) || flush_meta(hashp))
+ return (ERROR);
+ hashp->new_file = 0;
+ return (0);
+}
+
+/*
+ * Returns:
+ * 0 == OK
+ * -1 indicates that errno should be set
+ */
+static int
+flush_meta(hashp)
+ HTAB *hashp;
+{
+ HASHHDR *whdrp;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ HASHHDR whdr;
+#endif
+ int fp, i, wsize;
+
+ if (!hashp->save_file)
+ return (0);
+ hashp->MAGIC = HASHMAGIC;
+ hashp->VERSION = HASHVERSION;
+ hashp->H_CHARKEY = hashp->hash(CHARKEY, sizeof(CHARKEY));
+
+ fp = hashp->fp;
+ whdrp = &hashp->hdr;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ whdrp = &whdr;
+ swap_header_copy(&hashp->hdr, whdrp);
+#endif
+ if ((lseek(fp, (off_t)0, SEEK_SET) == -1) ||
+ ((wsize = write(fp, whdrp, sizeof(HASHHDR))) == -1))
+ return (-1);
+ else
+ if (wsize != sizeof(HASHHDR)) {
+ errno = EFTYPE;
+ hashp->errnum = errno;
+ return (-1);
+ }
+ for (i = 0; i < NCACHED; i++)
+ if (hashp->mapp[i])
+ if (__put_page(hashp, (char *)hashp->mapp[i],
+ hashp->BITMAPS[i], 0, 1))
+ return (-1);
+ return (0);
+}
+
+/*******************************SEARCH ROUTINES *****************************/
+/*
+ * All the access routines return
+ *
+ * Returns:
+ * 0 on SUCCESS
+ * 1 to indicate an external ERROR (i.e. key not found, etc)
+ * -1 to indicate an internal ERROR (i.e. out of memory, etc)
+ */
+static int
+hash_get(dbp, key, data, flag)
+ const DB *dbp;
+ const DBT *key;
+ DBT *data;
+ u_int32_t flag;
+{
+ HTAB *hashp;
+
+ hashp = (HTAB *)dbp->internal;
+ if (flag) {
+ hashp->errnum = errno = EINVAL;
+ return (ERROR);
+ }
+ return (hash_access(hashp, HASH_GET, (DBT *)key, data));
+}
+
+static int
+hash_put(dbp, key, data, flag)
+ const DB *dbp;
+ DBT *key;
+ const DBT *data;
+ u_int32_t flag;
+{
+ HTAB *hashp;
+
+ hashp = (HTAB *)dbp->internal;
+ if (flag && flag != R_NOOVERWRITE) {
+ hashp->errnum = errno = EINVAL;
+ return (ERROR);
+ }
+ if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
+ hashp->errnum = errno = EPERM;
+ return (ERROR);
+ }
+ return (hash_access(hashp, flag == R_NOOVERWRITE ?
+ HASH_PUTNEW : HASH_PUT, (DBT *)key, (DBT *)data));
+}
+
+static int
+hash_delete(dbp, key, flag)
+ const DB *dbp;
+ const DBT *key;
+ u_int32_t flag; /* Ignored */
+{
+ HTAB *hashp;
+
+ hashp = (HTAB *)dbp->internal;
+ if (flag && flag != R_CURSOR) {
+ hashp->errnum = errno = EINVAL;
+ return (ERROR);
+ }
+ if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
+ hashp->errnum = errno = EPERM;
+ return (ERROR);
+ }
+ return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL));
+}
+
+/*
+ * Assume that hashp has been set in wrapper routine.
+ */
+static int
+hash_access(hashp, action, key, val)
+ HTAB *hashp;
+ ACTION action;
+ DBT *key, *val;
+{
+ register BUFHEAD *rbufp;
+ BUFHEAD *bufp, *save_bufp;
+ register u_int16_t *bp;
+ register int n, ndx, off, size;
+ register char *kp;
+ u_int16_t pageno;
+
+#ifdef HASH_STATISTICS
+ hash_accesses++;
+#endif
+
+ off = hashp->BSIZE;
+ size = key->size;
+ kp = (char *)key->data;
+ rbufp = __get_buf(hashp, __call_hash(hashp, kp, size), NULL, 0);
+ if (!rbufp)
+ return (ERROR);
+ save_bufp = rbufp;
+
+ /* Pin the bucket chain */
+ rbufp->flags |= BUF_PIN;
+ for (bp = (u_int16_t *)rbufp->page, n = *bp++, ndx = 1; ndx < n;)
+ if (bp[1] >= REAL_KEY) {
+ /* Real key/data pair */
+ if (size == off - *bp &&
+ memcmp(kp, rbufp->page + *bp, size) == 0)
+ goto found;
+ off = bp[1];
+#ifdef HASH_STATISTICS
+ hash_collisions++;
+#endif
+ bp += 2;
+ ndx += 2;
+ } else if (bp[1] == OVFLPAGE) {
+ rbufp = __get_buf(hashp, *bp, rbufp, 0);
+ if (!rbufp) {
+ save_bufp->flags &= ~BUF_PIN;
+ return (ERROR);
+ }
+ /* FOR LOOP INIT */
+ bp = (u_int16_t *)rbufp->page;
+ n = *bp++;
+ ndx = 1;
+ off = hashp->BSIZE;
+ } else if (bp[1] < REAL_KEY) {
+ if ((ndx =
+ __find_bigpair(hashp, rbufp, ndx, kp, size)) > 0)
+ goto found;
+ if (ndx == -2) {
+ bufp = rbufp;
+ if (!(pageno =
+ __find_last_page(hashp, &bufp))) {
+ ndx = 0;
+ rbufp = bufp;
+ break; /* FOR */
+ }
+ rbufp = __get_buf(hashp, pageno, bufp, 0);
+ if (!rbufp) {
+ save_bufp->flags &= ~BUF_PIN;
+ return (ERROR);
+ }
+ /* FOR LOOP INIT */
+ bp = (u_int16_t *)rbufp->page;
+ n = *bp++;
+ ndx = 1;
+ off = hashp->BSIZE;
+ } else {
+ save_bufp->flags &= ~BUF_PIN;
+ return (ERROR);
+ }
+ }
+
+ /* Not found */
+ switch (action) {
+ case HASH_PUT:
+ case HASH_PUTNEW:
+ if (__addel(hashp, rbufp, key, val)) {
+ save_bufp->flags &= ~BUF_PIN;
+ return (ERROR);
+ } else {
+ save_bufp->flags &= ~BUF_PIN;
+ return (SUCCESS);
+ }
+ case HASH_GET:
+ case HASH_DELETE:
+ default:
+ save_bufp->flags &= ~BUF_PIN;
+ return (ABNORMAL);
+ }
+
+found:
+ switch (action) {
+ case HASH_PUTNEW:
+ save_bufp->flags &= ~BUF_PIN;
+ return (ABNORMAL);
+ case HASH_GET:
+ bp = (u_int16_t *)rbufp->page;
+ if (bp[ndx + 1] < REAL_KEY) {
+ if (__big_return(hashp, rbufp, ndx, val, 0))
+ return (ERROR);
+ } else {
+ val->data = (u_char *)rbufp->page + (int)bp[ndx + 1];
+ val->size = bp[ndx] - bp[ndx + 1];
+ }
+ break;
+ case HASH_PUT:
+ if ((__delpair(hashp, rbufp, ndx)) ||
+ (__addel(hashp, rbufp, key, val))) {
+ save_bufp->flags &= ~BUF_PIN;
+ return (ERROR);
+ }
+ break;
+ case HASH_DELETE:
+ if (__delpair(hashp, rbufp, ndx))
+ return (ERROR);
+ break;
+ default:
+ abort();
+ }
+ save_bufp->flags &= ~BUF_PIN;
+ return (SUCCESS);
+}
+
+static int
+hash_seq(dbp, key, data, flag)
+ const DB *dbp;
+ DBT *key, *data;
+ u_int32_t flag;
+{
+ register u_int32_t bucket;
+ register BUFHEAD *bufp;
+ HTAB *hashp;
+ u_int16_t *bp, ndx;
+
+ hashp = (HTAB *)dbp->internal;
+ if (flag && flag != R_FIRST && flag != R_NEXT) {
+ hashp->errnum = errno = EINVAL;
+ return (ERROR);
+ }
+#ifdef HASH_STATISTICS
+ hash_accesses++;
+#endif
+ if ((hashp->cbucket < 0) || (flag == R_FIRST)) {
+ hashp->cbucket = 0;
+ hashp->cndx = 1;
+ hashp->cpage = NULL;
+ }
+
+ for (bp = NULL; !bp || !bp[0]; ) {
+ if (!(bufp = hashp->cpage)) {
+ for (bucket = hashp->cbucket;
+ bucket <= (u_int32_t) hashp->MAX_BUCKET;
+ bucket++, hashp->cndx = 1) {
+ bufp = __get_buf(hashp, bucket, NULL, 0);
+ if (!bufp)
+ return (ERROR);
+ hashp->cpage = bufp;
+ bp = (u_int16_t *)bufp->page;
+ if (bp[0])
+ break;
+ }
+ hashp->cbucket = bucket;
+ if (hashp->cbucket > hashp->MAX_BUCKET) {
+ hashp->cbucket = -1;
+ return (ABNORMAL);
+ }
+ } else
+ bp = (u_int16_t *)hashp->cpage->page;
+
+#ifdef DEBUG
+ assert(bp);
+ assert(bufp);
+#endif
+ while (bp[hashp->cndx + 1] == OVFLPAGE) {
+ bufp = hashp->cpage =
+ __get_buf(hashp, bp[hashp->cndx], bufp, 0);
+ if (!bufp)
+ return (ERROR);
+ bp = (u_int16_t *)(bufp->page);
+ hashp->cndx = 1;
+ }
+ if (!bp[0]) {
+ hashp->cpage = NULL;
+ ++hashp->cbucket;
+ }
+ }
+ ndx = hashp->cndx;
+ if (bp[ndx + 1] < REAL_KEY) {
+ if (__big_keydata(hashp, bufp, key, data, 1))
+ return (ERROR);
+ } else {
+ key->data = (u_char *)hashp->cpage->page + bp[ndx];
+ key->size = (ndx > 1 ? bp[ndx - 1] : hashp->BSIZE) - bp[ndx];
+ data->data = (u_char *)hashp->cpage->page + bp[ndx + 1];
+ data->size = bp[ndx] - bp[ndx + 1];
+ ndx += 2;
+ if (ndx > bp[0]) {
+ hashp->cpage = NULL;
+ hashp->cbucket++;
+ hashp->cndx = 1;
+ } else
+ hashp->cndx = ndx;
+ }
+ return (SUCCESS);
+}
+
+/********************************* UTILITIES ************************/
+
+/*
+ * Returns:
+ * 0 ==> OK
+ * -1 ==> Error
+ */
+extern int
+__expand_table(hashp)
+ HTAB *hashp;
+{
+ u_int32_t old_bucket, new_bucket;
+ int dirsize, new_segnum, spare_ndx;
+
+#ifdef HASH_STATISTICS
+ hash_expansions++;
+#endif
+ new_bucket = ++hashp->MAX_BUCKET;
+ old_bucket = (hashp->MAX_BUCKET & hashp->LOW_MASK);
+
+ new_segnum = new_bucket >> hashp->SSHIFT;
+
+ /* Check if we need a new segment */
+ if (new_segnum >= hashp->nsegs) {
+ /* Check if we need to expand directory */
+ if (new_segnum >= hashp->DSIZE) {
+ /* Reallocate directory */
+ dirsize = hashp->DSIZE * sizeof(SEGMENT *);
+ if (!hash_realloc(&hashp->dir, dirsize, dirsize << 1))
+ return (-1);
+ hashp->DSIZE = dirsize << 1;
+ }
+ if ((hashp->dir[new_segnum] =
+ (SEGMENT)calloc(hashp->SGSIZE, sizeof(SEGMENT))) == NULL)
+ return (-1);
+ hashp->exsegs++;
+ hashp->nsegs++;
+ }
+ /*
+ * If the split point is increasing (MAX_BUCKET's log base 2
+ * * increases), we need to copy the current contents of the spare
+ * split bucket to the next bucket.
+ */
+ spare_ndx = __hash_log2(hashp->MAX_BUCKET + 1);
+ if (spare_ndx > hashp->OVFL_POINT) {
+ hashp->SPARES[spare_ndx] = hashp->SPARES[hashp->OVFL_POINT];
+ hashp->OVFL_POINT = spare_ndx;
+ }
+
+ if (new_bucket > (u_int32_t) hashp->HIGH_MASK) {
+ /* Starting a new doubling */
+ hashp->LOW_MASK = hashp->HIGH_MASK;
+ hashp->HIGH_MASK = new_bucket | hashp->LOW_MASK;
+ }
+ /* Relocate records to the new bucket */
+ return (__split_page(hashp, old_bucket, new_bucket));
+}
+
+/*
+ * If realloc guarantees that the pointer is not destroyed if the realloc
+ * fails, then this routine can go away.
+ */
+static void *
+hash_realloc(p_ptr, oldsize, newsize)
+ SEGMENT **p_ptr;
+ int oldsize, newsize;
+{
+ register void *p;
+
+ if ((p = malloc(newsize))) {
+ memmove(p, *p_ptr, oldsize);
+ memset((char *)p + oldsize, 0, newsize - oldsize);
+ free(*p_ptr);
+ *p_ptr = p;
+ }
+ return (p);
+}
+
+extern u_int32_t
+__call_hash(hashp, k, len)
+ HTAB *hashp;
+ char *k;
+ int len;
+{
+ int n, bucket;
+
+ n = hashp->hash(k, len);
+ bucket = n & hashp->HIGH_MASK;
+ if (bucket > hashp->MAX_BUCKET)
+ bucket = bucket & hashp->LOW_MASK;
+ return (bucket);
+}
+
+/*
+ * Allocate segment table. On error, destroy the table and set errno.
+ *
+ * Returns 0 on success
+ */
+static int
+alloc_segs(hashp, nsegs)
+ HTAB *hashp;
+ int nsegs;
+{
+ register int i;
+ register SEGMENT store;
+
+ int save_errno;
+
+ if ((hashp->dir =
+ (SEGMENT *)calloc(hashp->DSIZE, sizeof(SEGMENT *))) == NULL) {
+ save_errno = errno;
+ (void)hdestroy(hashp);
+ errno = save_errno;
+ return (-1);
+ }
+ /* Allocate segments */
+ if ((store =
+ (SEGMENT)calloc(nsegs << hashp->SSHIFT, sizeof(SEGMENT))) == NULL) {
+ save_errno = errno;
+ (void)hdestroy(hashp);
+ errno = save_errno;
+ return (-1);
+ }
+ for (i = 0; i < nsegs; i++, hashp->nsegs++)
+ hashp->dir[i] = &store[i << hashp->SSHIFT];
+ return (0);
+}
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+/*
+ * Hashp->hdr needs to be byteswapped.
+ */
+static void
+swap_header_copy(srcp, destp)
+ HASHHDR *srcp, *destp;
+{
+ int i;
+
+ P_32_COPY(srcp->magic, destp->magic);
+ P_32_COPY(srcp->version, destp->version);
+ P_32_COPY(srcp->lorder, destp->lorder);
+ P_32_COPY(srcp->bsize, destp->bsize);
+ P_32_COPY(srcp->bshift, destp->bshift);
+ P_32_COPY(srcp->dsize, destp->dsize);
+ P_32_COPY(srcp->ssize, destp->ssize);
+ P_32_COPY(srcp->sshift, destp->sshift);
+ P_32_COPY(srcp->ovfl_point, destp->ovfl_point);
+ P_32_COPY(srcp->last_freed, destp->last_freed);
+ P_32_COPY(srcp->max_bucket, destp->max_bucket);
+ P_32_COPY(srcp->high_mask, destp->high_mask);
+ P_32_COPY(srcp->low_mask, destp->low_mask);
+ P_32_COPY(srcp->ffactor, destp->ffactor);
+ P_32_COPY(srcp->nkeys, destp->nkeys);
+ P_32_COPY(srcp->hdrpages, destp->hdrpages);
+ P_32_COPY(srcp->h_charkey, destp->h_charkey);
+ for (i = 0; i < NCACHED; i++) {
+ P_32_COPY(srcp->spares[i], destp->spares[i]);
+ P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]);
+ }
+}
+
+static void
+swap_header(hashp)
+ HTAB *hashp;
+{
+ HASHHDR *hdrp;
+ int i;
+
+ hdrp = &hashp->hdr;
+
+ M_32_SWAP(hdrp->magic);
+ M_32_SWAP(hdrp->version);
+ M_32_SWAP(hdrp->lorder);
+ M_32_SWAP(hdrp->bsize);
+ M_32_SWAP(hdrp->bshift);
+ M_32_SWAP(hdrp->dsize);
+ M_32_SWAP(hdrp->ssize);
+ M_32_SWAP(hdrp->sshift);
+ M_32_SWAP(hdrp->ovfl_point);
+ M_32_SWAP(hdrp->last_freed);
+ M_32_SWAP(hdrp->max_bucket);
+ M_32_SWAP(hdrp->high_mask);
+ M_32_SWAP(hdrp->low_mask);
+ M_32_SWAP(hdrp->ffactor);
+ M_32_SWAP(hdrp->nkeys);
+ M_32_SWAP(hdrp->hdrpages);
+ M_32_SWAP(hdrp->h_charkey);
+ for (i = 0; i < NCACHED; i++) {
+ M_32_SWAP(hdrp->spares[i]);
+ M_16_SWAP(hdrp->bitmaps[i]);
+ }
+}
+#endif
diff --git a/db1-ast/hash/hash.h b/db1-ast/hash/hash.h
new file mode 100755
index 000000000..d07db6f07
--- /dev/null
+++ b/db1-ast/hash/hash.h
@@ -0,0 +1,293 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)hash.h 8.3 (Berkeley) 5/31/94
+ */
+
+/* Operations */
+typedef enum {
+ HASH_GET, HASH_PUT, HASH_PUTNEW, HASH_DELETE, HASH_FIRST, HASH_NEXT
+} ACTION;
+
+/* Buffer Management structures */
+typedef struct _bufhead BUFHEAD;
+
+struct _bufhead {
+ BUFHEAD *prev; /* LRU links */
+ BUFHEAD *next; /* LRU links */
+ BUFHEAD *ovfl; /* Overflow page buffer header */
+ u_int32_t addr; /* Address of this page */
+ char *page; /* Actual page data */
+ char flags;
+#define BUF_MOD 0x0001
+#define BUF_DISK 0x0002
+#define BUF_BUCKET 0x0004
+#define BUF_PIN 0x0008
+};
+
+#define IS_BUCKET(X) ((X) & BUF_BUCKET)
+
+typedef BUFHEAD **SEGMENT;
+
+/* Hash Table Information */
+typedef struct hashhdr { /* Disk resident portion */
+ int magic; /* Magic NO for hash tables */
+ int version; /* Version ID */
+ u_int32_t lorder; /* Byte Order */
+ int bsize; /* Bucket/Page Size */
+ int bshift; /* Bucket shift */
+ int dsize; /* Directory Size */
+ int ssize; /* Segment Size */
+ int sshift; /* Segment shift */
+ int ovfl_point; /* Where overflow pages are being
+ * allocated */
+ int last_freed; /* Last overflow page freed */
+ int max_bucket; /* ID of Maximum bucket in use */
+ int high_mask; /* Mask to modulo into entire table */
+ int low_mask; /* Mask to modulo into lower half of
+ * table */
+ int ffactor; /* Fill factor */
+ int nkeys; /* Number of keys in hash table */
+ int hdrpages; /* Size of table header */
+ int h_charkey; /* value of hash(CHARKEY) */
+#define NCACHED 32 /* number of bit maps and spare
+ * points */
+ int spares[NCACHED];/* spare pages for overflow */
+ u_int16_t bitmaps[NCACHED]; /* address of overflow page
+ * bitmaps */
+} HASHHDR;
+
+typedef struct htab { /* Memory resident data structure */
+ HASHHDR hdr; /* Header */
+ int nsegs; /* Number of allocated segments */
+ int exsegs; /* Number of extra allocated
+ * segments */
+ u_int32_t /* Hash function */
+ (*hash)__P((const void *, size_t));
+ int flags; /* Flag values */
+ int fp; /* File pointer */
+ char *tmp_buf; /* Temporary Buffer for BIG data */
+ char *tmp_key; /* Temporary Buffer for BIG keys */
+ BUFHEAD *cpage; /* Current page */
+ int cbucket; /* Current bucket */
+ int cndx; /* Index of next item on cpage */
+ int errnum; /* Error Number -- for DBM
+ * compatibility */
+ int new_file; /* Indicates if fd is backing store
+ * or no */
+ int save_file; /* Indicates whether we need to flush
+ * file at
+ * exit */
+ u_int32_t *mapp[NCACHED]; /* Pointers to page maps */
+ int nmaps; /* Initial number of bitmaps */
+ int nbufs; /* Number of buffers left to
+ * allocate */
+ BUFHEAD bufhead; /* Header of buffer lru list */
+ SEGMENT *dir; /* Hash Bucket directory */
+} HTAB;
+
+/*
+ * Constants
+ */
+#define MAX_BSIZE 65536 /* 2^16 */
+#define MIN_BUFFERS 6
+#define MINHDRSIZE 512
+#define DEF_BUFSIZE 65536 /* 64 K */
+#define DEF_BUCKET_SIZE 4096
+#define DEF_BUCKET_SHIFT 12 /* log2(BUCKET) */
+#define DEF_SEGSIZE 256
+#define DEF_SEGSIZE_SHIFT 8 /* log2(SEGSIZE) */
+#define DEF_DIRSIZE 256
+#define DEF_FFACTOR 65536
+#define MIN_FFACTOR 4
+#define SPLTMAX 8
+#define CHARKEY "%$sniglet^&"
+#define NUMKEY 1038583
+#define BYTE_SHIFT 3
+#define INT_TO_BYTE 2
+#define INT_BYTE_SHIFT 5
+#define ALL_SET ((u_int32_t)0xFFFFFFFF)
+#define ALL_CLEAR 0
+
+#define PTROF(X) ((BUFHEAD *)((ptrdiff_t)(X)&~0x3))
+#define ISMOD(X) ((u_int32_t)(ptrdiff_t)(X)&0x1)
+#define DOMOD(X) ((X) = (char *)((ptrdiff_t)(X)|0x1))
+#define ISDISK(X) ((u_int32_t)(ptrdiff_t)(X)&0x2)
+#define DODISK(X) ((X) = (char *)((ptrdiff_t)(X)|0x2))
+
+#define BITS_PER_MAP 32
+
+/* Given the address of the beginning of a big map, clear/set the nth bit */
+#define CLRBIT(A, N) ((A)[(N)/BITS_PER_MAP] &= ~(1<<((N)%BITS_PER_MAP)))
+#define SETBIT(A, N) ((A)[(N)/BITS_PER_MAP] |= (1<<((N)%BITS_PER_MAP)))
+#define ISSET(A, N) ((A)[(N)/BITS_PER_MAP] & (1<<((N)%BITS_PER_MAP)))
+
+/* Overflow management */
+/*
+ * Overflow page numbers are allocated per split point. At each doubling of
+ * the table, we can allocate extra pages. So, an overflow page number has
+ * the top 5 bits indicate which split point and the lower 11 bits indicate
+ * which page at that split point is indicated (pages within split points are
+ * numberered starting with 1).
+ */
+
+#define SPLITSHIFT 11
+#define SPLITMASK 0x7FF
+#define SPLITNUM(N) (((u_int32_t)(N)) >> SPLITSHIFT)
+#define OPAGENUM(N) ((N) & SPLITMASK)
+#define OADDR_OF(S,O) ((u_int32_t)((u_int32_t)(S) << SPLITSHIFT) + (O))
+
+#define BUCKET_TO_PAGE(B) \
+ (B) + hashp->HDRPAGES + ((B) ? hashp->SPARES[__hash_log2((B)+1)-1] : 0)
+#define OADDR_TO_PAGE(B) \
+ BUCKET_TO_PAGE ( (1 << SPLITNUM((B))) -1 ) + OPAGENUM((B));
+
+/*
+ * page.h contains a detailed description of the page format.
+ *
+ * Normally, keys and data are accessed from offset tables in the top of
+ * each page which point to the beginning of the key and data. There are
+ * four flag values which may be stored in these offset tables which indicate
+ * the following:
+ *
+ *
+ * OVFLPAGE Rather than a key data pair, this pair contains
+ * the address of an overflow page. The format of
+ * the pair is:
+ * OVERFLOW_PAGE_NUMBER OVFLPAGE
+ *
+ * PARTIAL_KEY This must be the first key/data pair on a page
+ * and implies that page contains only a partial key.
+ * That is, the key is too big to fit on a single page
+ * so it starts on this page and continues on the next.
+ * The format of the page is:
+ * KEY_OFF PARTIAL_KEY OVFL_PAGENO OVFLPAGE
+ *
+ * KEY_OFF -- offset of the beginning of the key
+ * PARTIAL_KEY -- 1
+ * OVFL_PAGENO - page number of the next overflow page
+ * OVFLPAGE -- 0
+ *
+ * FULL_KEY This must be the first key/data pair on the page. It
+ * is used in two cases.
+ *
+ * Case 1:
+ * There is a complete key on the page but no data
+ * (because it wouldn't fit). The next page contains
+ * the data.
+ *
+ * Page format it:
+ * KEY_OFF FULL_KEY OVFL_PAGENO OVFL_PAGE
+ *
+ * KEY_OFF -- offset of the beginning of the key
+ * FULL_KEY -- 2
+ * OVFL_PAGENO - page number of the next overflow page
+ * OVFLPAGE -- 0
+ *
+ * Case 2:
+ * This page contains no key, but part of a large
+ * data field, which is continued on the next page.
+ *
+ * Page format it:
+ * DATA_OFF FULL_KEY OVFL_PAGENO OVFL_PAGE
+ *
+ * KEY_OFF -- offset of the beginning of the data on
+ * this page
+ * FULL_KEY -- 2
+ * OVFL_PAGENO - page number of the next overflow page
+ * OVFLPAGE -- 0
+ *
+ * FULL_KEY_DATA
+ * This must be the first key/data pair on the page.
+ * There are two cases:
+ *
+ * Case 1:
+ * This page contains a key and the beginning of the
+ * data field, but the data field is continued on the
+ * next page.
+ *
+ * Page format is:
+ * KEY_OFF FULL_KEY_DATA OVFL_PAGENO DATA_OFF
+ *
+ * KEY_OFF -- offset of the beginning of the key
+ * FULL_KEY_DATA -- 3
+ * OVFL_PAGENO - page number of the next overflow page
+ * DATA_OFF -- offset of the beginning of the data
+ *
+ * Case 2:
+ * This page contains the last page of a big data pair.
+ * There is no key, only the tail end of the data
+ * on this page.
+ *
+ * Page format is:
+ * DATA_OFF FULL_KEY_DATA <OVFL_PAGENO> <OVFLPAGE>
+ *
+ * DATA_OFF -- offset of the beginning of the data on
+ * this page
+ * FULL_KEY_DATA -- 3
+ * OVFL_PAGENO - page number of the next overflow page
+ * OVFLPAGE -- 0
+ *
+ * OVFL_PAGENO and OVFLPAGE are optional (they are
+ * not present if there is no next page).
+ */
+
+#define OVFLPAGE 0
+#define PARTIAL_KEY 1
+#define FULL_KEY 2
+#define FULL_KEY_DATA 3
+#define REAL_KEY 4
+
+/* Short hands for accessing structure */
+#define BSIZE hdr.bsize
+#define BSHIFT hdr.bshift
+#define DSIZE hdr.dsize
+#define SGSIZE hdr.ssize
+#define SSHIFT hdr.sshift
+#define LORDER hdr.lorder
+#define OVFL_POINT hdr.ovfl_point
+#define LAST_FREED hdr.last_freed
+#define MAX_BUCKET hdr.max_bucket
+#define FFACTOR hdr.ffactor
+#define HIGH_MASK hdr.high_mask
+#define LOW_MASK hdr.low_mask
+#define NKEYS hdr.nkeys
+#define HDRPAGES hdr.hdrpages
+#define SPARES hdr.spares
+#define BITMAPS hdr.bitmaps
+#define VERSION hdr.version
+#define MAGIC hdr.magic
+#define NEXT_FREE hdr.next_free
+#define H_CHARKEY hdr.h_charkey
diff --git a/db1-ast/hash/hash_bigkey.c b/db1-ast/hash/hash_bigkey.c
new file mode 100755
index 000000000..94c64083f
--- /dev/null
+++ b/db1-ast/hash/hash_bigkey.c
@@ -0,0 +1,668 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hash_bigkey.c 8.3 (Berkeley) 5/31/94";
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * PACKAGE: hash
+ * DESCRIPTION:
+ * Big key/data handling for the hashing package.
+ *
+ * ROUTINES:
+ * External
+ * __big_keydata
+ * __big_split
+ * __big_insert
+ * __big_return
+ * __big_delete
+ * __find_last_page
+ * Internal
+ * collect_key
+ * collect_data
+ */
+
+#include <sys/param.h>
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef DEBUG
+#include <assert.h>
+#endif
+
+#include <db.h>
+#include "hash.h"
+#include "page.h"
+#include "extern.h"
+
+static int collect_key __P((HTAB *, BUFHEAD *, int, DBT *, int));
+static int collect_data __P((HTAB *, BUFHEAD *, int, int));
+
+/*
+ * Big_insert
+ *
+ * You need to do an insert and the key/data pair is too big
+ *
+ * Returns:
+ * 0 ==> OK
+ *-1 ==> ERROR
+ */
+extern int
+__big_insert(hashp, bufp, key, val)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ const DBT *key, *val;
+{
+ register u_int16_t *p;
+ int key_size, n, val_size;
+ u_int16_t space, move_bytes, off;
+ char *cp, *key_data, *val_data;
+
+ cp = bufp->page; /* Character pointer of p. */
+ p = (u_int16_t *)cp;
+
+ key_data = (char *)key->data;
+ key_size = key->size;
+ val_data = (char *)val->data;
+ val_size = val->size;
+
+ /* First move the Key */
+ for (space = FREESPACE(p) - BIGOVERHEAD; key_size;
+ space = FREESPACE(p) - BIGOVERHEAD) {
+ move_bytes = MIN(space, key_size);
+ off = OFFSET(p) - move_bytes;
+ memmove(cp + off, key_data, move_bytes);
+ key_size -= move_bytes;
+ key_data += move_bytes;
+ n = p[0];
+ p[++n] = off;
+ p[0] = ++n;
+ FREESPACE(p) = off - PAGE_META(n);
+ OFFSET(p) = off;
+ p[n] = PARTIAL_KEY;
+ bufp = __add_ovflpage(hashp, bufp);
+ if (!bufp)
+ return (-1);
+ n = p[0];
+ if (!key_size) {
+ if (FREESPACE(p)) {
+ move_bytes = MIN(FREESPACE(p), val_size);
+ off = OFFSET(p) - move_bytes;
+ p[n] = off;
+ memmove(cp + off, val_data, move_bytes);
+ val_data += move_bytes;
+ val_size -= move_bytes;
+ p[n - 2] = FULL_KEY_DATA;
+ FREESPACE(p) = FREESPACE(p) - move_bytes;
+ OFFSET(p) = off;
+ } else
+ p[n - 2] = FULL_KEY;
+ }
+ p = (u_int16_t *)bufp->page;
+ cp = bufp->page;
+ bufp->flags |= BUF_MOD;
+ }
+
+ /* Now move the data */
+ for (space = FREESPACE(p) - BIGOVERHEAD; val_size;
+ space = FREESPACE(p) - BIGOVERHEAD) {
+ move_bytes = MIN(space, val_size);
+ /*
+ * Here's the hack to make sure that if the data ends on the
+ * same page as the key ends, FREESPACE is at least one.
+ */
+ if ((int) space == val_size && (size_t) val_size == val->size)
+ move_bytes--;
+ off = OFFSET(p) - move_bytes;
+ memmove(cp + off, val_data, move_bytes);
+ val_size -= move_bytes;
+ val_data += move_bytes;
+ n = p[0];
+ p[++n] = off;
+ p[0] = ++n;
+ FREESPACE(p) = off - PAGE_META(n);
+ OFFSET(p) = off;
+ if (val_size) {
+ p[n] = FULL_KEY;
+ bufp = __add_ovflpage(hashp, bufp);
+ if (!bufp)
+ return (-1);
+ cp = bufp->page;
+ p = (u_int16_t *)cp;
+ } else
+ p[n] = FULL_KEY_DATA;
+ bufp->flags |= BUF_MOD;
+ }
+ return (0);
+}
+
+/*
+ * Called when bufp's page contains a partial key (index should be 1)
+ *
+ * All pages in the big key/data pair except bufp are freed. We cannot
+ * free bufp because the page pointing to it is lost and we can't get rid
+ * of its pointer.
+ *
+ * Returns:
+ * 0 => OK
+ *-1 => ERROR
+ */
+extern int
+__big_delete(hashp, bufp)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+{
+ register BUFHEAD *last_bfp, *rbufp;
+ u_int16_t *bp, pageno;
+ int key_done, n;
+
+ rbufp = bufp;
+ last_bfp = NULL;
+ bp = (u_int16_t *)bufp->page;
+ pageno = 0;
+ key_done = 0;
+
+ while (!key_done || (bp[2] != FULL_KEY_DATA)) {
+ if (bp[2] == FULL_KEY || bp[2] == FULL_KEY_DATA)
+ key_done = 1;
+
+ /*
+ * If there is freespace left on a FULL_KEY_DATA page, then
+ * the data is short and fits entirely on this page, and this
+ * is the last page.
+ */
+ if (bp[2] == FULL_KEY_DATA && FREESPACE(bp))
+ break;
+ pageno = bp[bp[0] - 1];
+ rbufp->flags |= BUF_MOD;
+ rbufp = __get_buf(hashp, pageno, rbufp, 0);
+ if (last_bfp)
+ __free_ovflpage(hashp, last_bfp);
+ last_bfp = rbufp;
+ if (!rbufp)
+ return (-1); /* Error. */
+ bp = (u_int16_t *)rbufp->page;
+ }
+
+ /*
+ * If we get here then rbufp points to the last page of the big
+ * key/data pair. Bufp points to the first one -- it should now be
+ * empty pointing to the next page after this pair. Can't free it
+ * because we don't have the page pointing to it.
+ */
+
+ /* This is information from the last page of the pair. */
+ n = bp[0];
+ pageno = bp[n - 1];
+
+ /* Now, bp is the first page of the pair. */
+ bp = (u_int16_t *)bufp->page;
+ if (n > 2) {
+ /* There is an overflow page. */
+ bp[1] = pageno;
+ bp[2] = OVFLPAGE;
+ bufp->ovfl = rbufp->ovfl;
+ } else
+ /* This is the last page. */
+ bufp->ovfl = NULL;
+ n -= 2;
+ bp[0] = n;
+ FREESPACE(bp) = hashp->BSIZE - PAGE_META(n);
+ OFFSET(bp) = hashp->BSIZE - 1;
+
+ bufp->flags |= BUF_MOD;
+ if (rbufp)
+ __free_ovflpage(hashp, rbufp);
+ if (last_bfp && last_bfp != rbufp)
+ __free_ovflpage(hashp, last_bfp);
+
+ hashp->NKEYS--;
+ return (0);
+}
+/*
+ * Returns:
+ * 0 = key not found
+ * -1 = get next overflow page
+ * -2 means key not found and this is big key/data
+ * -3 error
+ */
+extern int
+__find_bigpair(hashp, bufp, ndx, key, size)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ int ndx;
+ char *key;
+ int size;
+{
+ register u_int16_t *bp;
+ register char *p;
+ int ksize;
+ u_int16_t bytes;
+ char *kkey;
+
+ bp = (u_int16_t *)bufp->page;
+ p = bufp->page;
+ ksize = size;
+ kkey = key;
+
+ for (bytes = hashp->BSIZE - bp[ndx];
+ bytes <= size && bp[ndx + 1] == PARTIAL_KEY;
+ bytes = hashp->BSIZE - bp[ndx]) {
+ if (memcmp(p + bp[ndx], kkey, bytes))
+ return (-2);
+ kkey += bytes;
+ ksize -= bytes;
+ bufp = __get_buf(hashp, bp[ndx + 2], bufp, 0);
+ if (!bufp)
+ return (-3);
+ p = bufp->page;
+ bp = (u_int16_t *)p;
+ ndx = 1;
+ }
+
+ if (bytes != ksize || memcmp(p + bp[ndx], kkey, bytes)) {
+#ifdef HASH_STATISTICS
+ ++hash_collisions;
+#endif
+ return (-2);
+ } else
+ return (ndx);
+}
+
+/*
+ * Given the buffer pointer of the first overflow page of a big pair,
+ * find the end of the big pair
+ *
+ * This will set bpp to the buffer header of the last page of the big pair.
+ * It will return the pageno of the overflow page following the last page
+ * of the pair; 0 if there isn't any (i.e. big pair is the last key in the
+ * bucket)
+ */
+extern u_int16_t
+__find_last_page(hashp, bpp)
+ HTAB *hashp;
+ BUFHEAD **bpp;
+{
+ BUFHEAD *bufp;
+ u_int16_t *bp, pageno;
+ int n;
+
+ bufp = *bpp;
+ bp = (u_int16_t *)bufp->page;
+ for (;;) {
+ n = bp[0];
+
+ /*
+ * This is the last page if: the tag is FULL_KEY_DATA and
+ * either only 2 entries OVFLPAGE marker is explicit there
+ * is freespace on the page.
+ */
+ if (bp[2] == FULL_KEY_DATA &&
+ ((n == 2) || (bp[n] == OVFLPAGE) || (FREESPACE(bp))))
+ break;
+
+ pageno = bp[n - 1];
+ bufp = __get_buf(hashp, pageno, bufp, 0);
+ if (!bufp)
+ return (0); /* Need to indicate an error! */
+ bp = (u_int16_t *)bufp->page;
+ }
+
+ *bpp = bufp;
+ if (bp[0] > 2)
+ return (bp[3]);
+ else
+ return (0);
+}
+
+/*
+ * Return the data for the key/data pair that begins on this page at this
+ * index (index should always be 1).
+ */
+extern int
+__big_return(hashp, bufp, ndx, val, set_current)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ int ndx;
+ DBT *val;
+ int set_current;
+{
+ BUFHEAD *save_p;
+ u_int16_t *bp, len, off, save_addr;
+ char *tp;
+
+ bp = (u_int16_t *)bufp->page;
+ while (bp[ndx + 1] == PARTIAL_KEY) {
+ bufp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!bufp)
+ return (-1);
+ bp = (u_int16_t *)bufp->page;
+ ndx = 1;
+ }
+
+ if (bp[ndx + 1] == FULL_KEY) {
+ bufp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!bufp)
+ return (-1);
+ bp = (u_int16_t *)bufp->page;
+ save_p = bufp;
+ save_addr = save_p->addr;
+ off = bp[1];
+ len = 0;
+ } else
+ if (!FREESPACE(bp)) {
+ /*
+ * This is a hack. We can't distinguish between
+ * FULL_KEY_DATA that contains complete data or
+ * incomplete data, so we require that if the data
+ * is complete, there is at least 1 byte of free
+ * space left.
+ */
+ off = bp[bp[0]];
+ len = bp[1] - off;
+ save_p = bufp;
+ save_addr = bufp->addr;
+ bufp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!bufp)
+ return (-1);
+ bp = (u_int16_t *)bufp->page;
+ } else {
+ /* The data is all on one page. */
+ tp = (char *)bp;
+ off = bp[bp[0]];
+ val->data = (u_char *)tp + off;
+ val->size = bp[1] - off;
+ if (set_current) {
+ if (bp[0] == 2) { /* No more buckets in
+ * chain */
+ hashp->cpage = NULL;
+ hashp->cbucket++;
+ hashp->cndx = 1;
+ } else {
+ hashp->cpage = __get_buf(hashp,
+ bp[bp[0] - 1], bufp, 0);
+ if (!hashp->cpage)
+ return (-1);
+ hashp->cndx = 1;
+ if (!((u_int16_t *)
+ hashp->cpage->page)[0]) {
+ hashp->cbucket++;
+ hashp->cpage = NULL;
+ }
+ }
+ }
+ return (0);
+ }
+
+ val->size = collect_data(hashp, bufp, (int)len, set_current);
+ if (val->size == (size_t) -1)
+ return (-1);
+ if (save_p->addr != save_addr) {
+ /* We are pretty short on buffers. */
+ errno = EINVAL; /* OUT OF BUFFERS */
+ return (-1);
+ }
+ memmove(hashp->tmp_buf, (save_p->page) + off, len);
+ val->data = (u_char *)hashp->tmp_buf;
+ return (0);
+}
+/*
+ * Count how big the total datasize is by recursing through the pages. Then
+ * allocate a buffer and copy the data as you recurse up.
+ */
+static int
+collect_data(hashp, bufp, len, set)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ int len, set;
+{
+ register u_int16_t *bp;
+ register char *p;
+ BUFHEAD *xbp;
+ u_int16_t save_addr;
+ int mylen, totlen;
+
+ p = bufp->page;
+ bp = (u_int16_t *)p;
+ mylen = hashp->BSIZE - bp[1];
+ save_addr = bufp->addr;
+
+ if (bp[2] == FULL_KEY_DATA) { /* End of Data */
+ totlen = len + mylen;
+ if (hashp->tmp_buf)
+ free(hashp->tmp_buf);
+ if ((hashp->tmp_buf = (char *)malloc(totlen)) == NULL)
+ return (-1);
+ if (set) {
+ hashp->cndx = 1;
+ if (bp[0] == 2) { /* No more buckets in chain */
+ hashp->cpage = NULL;
+ hashp->cbucket++;
+ } else {
+ hashp->cpage =
+ __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!hashp->cpage)
+ return (-1);
+ else if (!((u_int16_t *)hashp->cpage->page)[0]) {
+ hashp->cbucket++;
+ hashp->cpage = NULL;
+ }
+ }
+ }
+ } else {
+ xbp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!xbp || ((totlen =
+ collect_data(hashp, xbp, len + mylen, set)) < 1))
+ return (-1);
+ }
+ if (bufp->addr != save_addr) {
+ errno = EINVAL; /* Out of buffers. */
+ return (-1);
+ }
+ memmove(&hashp->tmp_buf[len], (bufp->page) + bp[1], mylen);
+ return (totlen);
+}
+
+/*
+ * Fill in the key and data for this big pair.
+ */
+extern int
+__big_keydata(hashp, bufp, key, val, set)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ DBT *key, *val;
+ int set;
+{
+ key->size = collect_key(hashp, bufp, 0, val, set);
+ if (key->size == (size_t) -1)
+ return (-1);
+ key->data = (u_char *)hashp->tmp_key;
+ return (0);
+}
+
+/*
+ * Count how big the total key size is by recursing through the pages. Then
+ * collect the data, allocate a buffer and copy the key as you recurse up.
+ */
+static int
+collect_key(hashp, bufp, len, val, set)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ int len;
+ DBT *val;
+ int set;
+{
+ BUFHEAD *xbp;
+ char *p;
+ int mylen, totlen;
+ u_int16_t *bp, save_addr;
+
+ p = bufp->page;
+ bp = (u_int16_t *)p;
+ mylen = hashp->BSIZE - bp[1];
+
+ save_addr = bufp->addr;
+ totlen = len + mylen;
+ if (bp[2] == FULL_KEY || bp[2] == FULL_KEY_DATA) { /* End of Key. */
+ if (hashp->tmp_key != NULL)
+ free(hashp->tmp_key);
+ if ((hashp->tmp_key = (char *)malloc(totlen)) == NULL)
+ return (-1);
+ if (__big_return(hashp, bufp, 1, val, set))
+ return (-1);
+ } else {
+ xbp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!xbp || ((totlen =
+ collect_key(hashp, xbp, totlen, val, set)) < 1))
+ return (-1);
+ }
+ if (bufp->addr != save_addr) {
+ errno = EINVAL; /* MIS -- OUT OF BUFFERS */
+ return (-1);
+ }
+ memmove(&hashp->tmp_key[len], (bufp->page) + bp[1], mylen);
+ return (totlen);
+}
+
+/*
+ * Returns:
+ * 0 => OK
+ * -1 => error
+ */
+extern int
+__big_split(hashp, op, np, big_keyp, addr, obucket, ret)
+ HTAB *hashp;
+ BUFHEAD *op; /* Pointer to where to put keys that go in old bucket */
+ BUFHEAD *np; /* Pointer to new bucket page */
+ /* Pointer to first page containing the big key/data */
+ BUFHEAD *big_keyp;
+ int addr; /* Address of big_keyp */
+ u_int32_t obucket;/* Old Bucket */
+ SPLIT_RETURN *ret;
+{
+ register BUFHEAD *tmpp;
+ register u_int16_t *tp;
+ BUFHEAD *bp;
+ DBT key, val;
+ u_int32_t change;
+ u_int16_t free_space, n, off;
+
+ bp = big_keyp;
+
+ /* Now figure out where the big key/data goes */
+ if (__big_keydata(hashp, big_keyp, &key, &val, 0))
+ return (-1);
+ change = (__call_hash(hashp, key.data, key.size) != obucket);
+
+ if ((ret->next_addr = __find_last_page(hashp, &big_keyp))) {
+ if (!(ret->nextp =
+ __get_buf(hashp, ret->next_addr, big_keyp, 0)))
+ return (-1);;
+ } else
+ ret->nextp = NULL;
+
+ /* Now make one of np/op point to the big key/data pair */
+#ifdef DEBUG
+ assert(np->ovfl == NULL);
+#endif
+ if (change)
+ tmpp = np;
+ else
+ tmpp = op;
+
+ tmpp->flags |= BUF_MOD;
+#ifdef DEBUG1
+ (void)fprintf(stderr,
+ "BIG_SPLIT: %d->ovfl was %d is now %d\n", tmpp->addr,
+ (tmpp->ovfl ? tmpp->ovfl->addr : 0), (bp ? bp->addr : 0));
+#endif
+ tmpp->ovfl = bp; /* one of op/np point to big_keyp */
+ tp = (u_int16_t *)tmpp->page;
+#ifdef DEBUG
+ assert(FREESPACE(tp) >= OVFLSIZE);
+#endif
+ n = tp[0];
+ off = OFFSET(tp);
+ free_space = FREESPACE(tp);
+ tp[++n] = (u_int16_t)addr;
+ tp[++n] = OVFLPAGE;
+ tp[0] = n;
+ OFFSET(tp) = off;
+ FREESPACE(tp) = free_space - OVFLSIZE;
+
+ /*
+ * Finally, set the new and old return values. BIG_KEYP contains a
+ * pointer to the last page of the big key_data pair. Make sure that
+ * big_keyp has no following page (2 elements) or create an empty
+ * following page.
+ */
+
+ ret->newp = np;
+ ret->oldp = op;
+
+ tp = (u_int16_t *)big_keyp->page;
+ big_keyp->flags |= BUF_MOD;
+ if (tp[0] > 2) {
+ /*
+ * There may be either one or two offsets on this page. If
+ * there is one, then the overflow page is linked on normally
+ * and tp[4] is OVFLPAGE. If there are two, tp[4] contains
+ * the second offset and needs to get stuffed in after the
+ * next overflow page is added.
+ */
+ n = tp[4];
+ free_space = FREESPACE(tp);
+ off = OFFSET(tp);
+ tp[0] -= 2;
+ FREESPACE(tp) = free_space + OVFLSIZE;
+ OFFSET(tp) = off;
+ tmpp = __add_ovflpage(hashp, big_keyp);
+ if (!tmpp)
+ return (-1);
+ tp[4] = n;
+ } else
+ tmpp = big_keyp;
+
+ if (change)
+ ret->newp = tmpp;
+ else
+ ret->oldp = tmpp;
+ return (0);
+}
diff --git a/db1-ast/hash/hash_buf.c b/db1-ast/hash/hash_buf.c
new file mode 100755
index 000000000..92e1f933a
--- /dev/null
+++ b/db1-ast/hash/hash_buf.c
@@ -0,0 +1,355 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hash_buf.c 8.5 (Berkeley) 7/15/94";
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * PACKAGE: hash
+ *
+ * DESCRIPTION:
+ * Contains buffer management
+ *
+ * ROUTINES:
+ * External
+ * __buf_init
+ * __get_buf
+ * __buf_free
+ * __reclaim_buf
+ * Internal
+ * newbuf
+ */
+
+#include <sys/param.h>
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef DEBUG
+#include <assert.h>
+#endif
+
+#include <db.h>
+#include "hash.h"
+#include "page.h"
+#include "extern.h"
+
+static BUFHEAD *newbuf __P((HTAB *, u_int32_t, BUFHEAD *));
+
+/* Unlink B from its place in the lru */
+#define BUF_REMOVE(B) { \
+ (B)->prev->next = (B)->next; \
+ (B)->next->prev = (B)->prev; \
+}
+
+/* Insert B after P */
+#define BUF_INSERT(B, P) { \
+ (B)->next = (P)->next; \
+ (B)->prev = (P); \
+ (P)->next = (B); \
+ (B)->next->prev = (B); \
+}
+
+#define MRU hashp->bufhead.next
+#define LRU hashp->bufhead.prev
+
+#define MRU_INSERT(B) BUF_INSERT((B), &hashp->bufhead)
+#define LRU_INSERT(B) BUF_INSERT((B), LRU)
+
+/*
+ * We are looking for a buffer with address "addr". If prev_bp is NULL, then
+ * address is a bucket index. If prev_bp is not NULL, then it points to the
+ * page previous to an overflow page that we are trying to find.
+ *
+ * CAVEAT: The buffer header accessed via prev_bp's ovfl field may no longer
+ * be valid. Therefore, you must always verify that its address matches the
+ * address you are seeking.
+ */
+extern BUFHEAD *
+__get_buf(hashp, addr, prev_bp, newpage)
+ HTAB *hashp;
+ u_int32_t addr;
+ BUFHEAD *prev_bp;
+ int newpage; /* If prev_bp set, indicates a new overflow page. */
+{
+ register BUFHEAD *bp;
+ register u_int32_t is_disk_mask;
+ register int is_disk, segment_ndx;
+ SEGMENT segp;
+
+ is_disk = 0;
+ is_disk_mask = 0;
+ if (prev_bp) {
+ bp = prev_bp->ovfl;
+ if (!bp || (bp->addr != addr))
+ bp = NULL;
+ if (!newpage)
+ is_disk = BUF_DISK;
+ } else {
+ /* Grab buffer out of directory */
+ segment_ndx = addr & (hashp->SGSIZE - 1);
+
+ /* valid segment ensured by __call_hash() */
+ segp = hashp->dir[addr >> hashp->SSHIFT];
+#ifdef DEBUG
+ assert(segp != NULL);
+#endif
+ bp = PTROF(segp[segment_ndx]);
+ is_disk_mask = ISDISK(segp[segment_ndx]);
+ is_disk = is_disk_mask || !hashp->new_file;
+ }
+
+ if (!bp) {
+ bp = newbuf(hashp, addr, prev_bp);
+ if (!bp ||
+ __get_page(hashp, bp->page, addr, !prev_bp, is_disk, 0))
+ return (NULL);
+ if (!prev_bp)
+ segp[segment_ndx] =
+ (BUFHEAD *)((ptrdiff_t)bp | is_disk_mask);
+ } else {
+ BUF_REMOVE(bp);
+ MRU_INSERT(bp);
+ }
+ return (bp);
+}
+
+/*
+ * We need a buffer for this page. Either allocate one, or evict a resident
+ * one (if we have as many buffers as we're allowed) and put this one in.
+ *
+ * If newbuf finds an error (returning NULL), it also sets errno.
+ */
+static BUFHEAD *
+newbuf(hashp, addr, prev_bp)
+ HTAB *hashp;
+ u_int32_t addr;
+ BUFHEAD *prev_bp;
+{
+ register BUFHEAD *bp; /* The buffer we're going to use */
+ register BUFHEAD *xbp; /* Temp pointer */
+ register BUFHEAD *next_xbp;
+ SEGMENT segp;
+ int segment_ndx;
+ u_int16_t oaddr, *shortp;
+
+ oaddr = 0;
+ bp = LRU;
+ /*
+ * If LRU buffer is pinned, the buffer pool is too small. We need to
+ * allocate more buffers.
+ */
+ if (hashp->nbufs || (bp->flags & BUF_PIN)) {
+ /* Allocate a new one */
+ if ((bp = (BUFHEAD *)malloc(sizeof(BUFHEAD))) == NULL)
+ return (NULL);
+#ifdef PURIFY
+ memset(bp, 0xff, sizeof(BUFHEAD));
+#endif
+ if ((bp->page = (char *)malloc(hashp->BSIZE)) == NULL) {
+ free(bp);
+ return (NULL);
+ }
+#ifdef PURIFY
+ memset(bp->page, 0xff, hashp->BSIZE);
+#endif
+ if (hashp->nbufs)
+ hashp->nbufs--;
+ } else {
+ /* Kick someone out */
+ BUF_REMOVE(bp);
+ /*
+ * If this is an overflow page with addr 0, it's already been
+ * flushed back in an overflow chain and initialized.
+ */
+ if ((bp->addr != 0) || (bp->flags & BUF_BUCKET)) {
+ /*
+ * Set oaddr before __put_page so that you get it
+ * before bytes are swapped.
+ */
+ shortp = (u_int16_t *)bp->page;
+ if (shortp[0])
+ oaddr = shortp[shortp[0] - 1];
+ if ((bp->flags & BUF_MOD) && __put_page(hashp, bp->page,
+ bp->addr, (int)IS_BUCKET(bp->flags), 0))
+ return (NULL);
+ /*
+ * Update the pointer to this page (i.e. invalidate it).
+ *
+ * If this is a new file (i.e. we created it at open
+ * time), make sure that we mark pages which have been
+ * written to disk so we retrieve them from disk later,
+ * rather than allocating new pages.
+ */
+ if (IS_BUCKET(bp->flags)) {
+ segment_ndx = bp->addr & (hashp->SGSIZE - 1);
+ segp = hashp->dir[bp->addr >> hashp->SSHIFT];
+#ifdef DEBUG
+ assert(segp != NULL);
+#endif
+
+ if (hashp->new_file &&
+ ((bp->flags & BUF_MOD) ||
+ ISDISK(segp[segment_ndx])))
+ segp[segment_ndx] = (BUFHEAD *)BUF_DISK;
+ else
+ segp[segment_ndx] = NULL;
+ }
+ /*
+ * Since overflow pages can only be access by means of
+ * their bucket, free overflow pages associated with
+ * this bucket.
+ */
+ for (xbp = bp; xbp->ovfl;) {
+ next_xbp = xbp->ovfl;
+ xbp->ovfl = 0;
+ xbp = next_xbp;
+
+ /* Check that ovfl pointer is up date. */
+ if (IS_BUCKET(xbp->flags) ||
+ (oaddr != xbp->addr))
+ break;
+
+ shortp = (u_int16_t *)xbp->page;
+ if (shortp[0])
+ /* set before __put_page */
+ oaddr = shortp[shortp[0] - 1];
+ if ((xbp->flags & BUF_MOD) && __put_page(hashp,
+ xbp->page, xbp->addr, 0, 0))
+ return (NULL);
+ xbp->addr = 0;
+ xbp->flags = 0;
+ BUF_REMOVE(xbp);
+ LRU_INSERT(xbp);
+ }
+ }
+ }
+
+ /* Now assign this buffer */
+ bp->addr = addr;
+#ifdef DEBUG1
+ (void)fprintf(stderr, "NEWBUF1: %d->ovfl was %d is now %d\n",
+ bp->addr, (bp->ovfl ? bp->ovfl->addr : 0), 0);
+#endif
+ bp->ovfl = NULL;
+ if (prev_bp) {
+ /*
+ * If prev_bp is set, this is an overflow page, hook it in to
+ * the buffer overflow links.
+ */
+#ifdef DEBUG1
+ (void)fprintf(stderr, "NEWBUF2: %d->ovfl was %d is now %d\n",
+ prev_bp->addr, (prev_bp->ovfl ? bp->ovfl->addr : 0),
+ (bp ? bp->addr : 0));
+#endif
+ prev_bp->ovfl = bp;
+ bp->flags = 0;
+ } else
+ bp->flags = BUF_BUCKET;
+ MRU_INSERT(bp);
+ return (bp);
+}
+
+extern void
+__buf_init(hashp, nbytes)
+ HTAB *hashp;
+ int nbytes;
+{
+ BUFHEAD *bfp;
+ int npages;
+
+ bfp = &(hashp->bufhead);
+ npages = (nbytes + hashp->BSIZE - 1) >> hashp->BSHIFT;
+ npages = MAX(npages, MIN_BUFFERS);
+
+ hashp->nbufs = npages;
+ bfp->next = bfp;
+ bfp->prev = bfp;
+ /*
+ * This space is calloc'd so these are already null.
+ *
+ * bfp->ovfl = NULL;
+ * bfp->flags = 0;
+ * bfp->page = NULL;
+ * bfp->addr = 0;
+ */
+}
+
+extern int
+__buf_free(hashp, do_free, to_disk)
+ HTAB *hashp;
+ int do_free, to_disk;
+{
+ BUFHEAD *bp;
+
+ /* Need to make sure that buffer manager has been initialized */
+ if (!LRU)
+ return (0);
+ for (bp = LRU; bp != &hashp->bufhead;) {
+ /* Check that the buffer is valid */
+ if (bp->addr || IS_BUCKET(bp->flags)) {
+ if (to_disk && (bp->flags & BUF_MOD) &&
+ __put_page(hashp, bp->page,
+ bp->addr, IS_BUCKET(bp->flags), 0))
+ return (-1);
+ }
+ /* Check if we are freeing stuff */
+ if (do_free) {
+ if (bp->page)
+ free(bp->page);
+ BUF_REMOVE(bp);
+ free(bp);
+ bp = LRU;
+ } else
+ bp = bp->prev;
+ }
+ return (0);
+}
+
+extern void
+__reclaim_buf(hashp, bp)
+ HTAB *hashp;
+ BUFHEAD *bp;
+{
+ bp->ovfl = 0;
+ bp->addr = 0;
+ bp->flags = 0;
+ BUF_REMOVE(bp);
+ LRU_INSERT(bp);
+}
diff --git a/db1-ast/hash/hash_func.c b/db1-ast/hash/hash_func.c
new file mode 100755
index 000000000..a5ec434ee
--- /dev/null
+++ b/db1-ast/hash/hash_func.c
@@ -0,0 +1,212 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hash_func.c 8.2 (Berkeley) 2/21/94";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/types.h>
+
+#include <db.h>
+#include "hash.h"
+#include "page.h"
+#include "extern.h"
+
+static u_int32_t hash1 __P((const void *, size_t));
+static u_int32_t hash2 __P((const void *, size_t));
+static u_int32_t hash3 __P((const void *, size_t));
+static u_int32_t hash4 __P((const void *, size_t));
+
+/* Global default hash function */
+u_int32_t (*__default_hash) __P((const void *, size_t)) = hash4;
+
+/*
+ * HASH FUNCTIONS
+ *
+ * Assume that we've already split the bucket to which this key hashes,
+ * calculate that bucket, and check that in fact we did already split it.
+ *
+ * This came from ejb's hsearch.
+ */
+
+#define PRIME1 37
+#define PRIME2 1048583
+
+static u_int32_t
+hash1(keyarg, len)
+ const void *keyarg;
+ register size_t len;
+{
+ register const u_char *key;
+ register u_int32_t h;
+
+ /* Convert string to integer */
+ for (key = keyarg, h = 0; len--;)
+ h = h * PRIME1 ^ (*key++ - ' ');
+ h %= PRIME2;
+ return (h);
+}
+
+/*
+ * Phong's linear congruential hash
+ */
+#define dcharhash(h, c) ((h) = 0x63c63cd9*(h) + 0x9c39c33d + (c))
+
+static u_int32_t
+hash2(keyarg, len)
+ const void *keyarg;
+ size_t len;
+{
+ register const u_char *e, *key;
+ register u_int32_t h;
+ register u_char c;
+
+ key = keyarg;
+ e = key + len;
+ for (h = 0; key != e;) {
+ c = *key++;
+ if (!c && key > e)
+ break;
+ dcharhash(h, c);
+ }
+ return (h);
+}
+
+/*
+ * This is INCREDIBLY ugly, but fast. We break the string up into 8 byte
+ * units. On the first time through the loop we get the "leftover bytes"
+ * (strlen % 8). On every other iteration, we perform 8 HASHC's so we handle
+ * all 8 bytes. Essentially, this saves us 7 cmp & branch instructions. If
+ * this routine is heavily used enough, it's worth the ugly coding.
+ *
+ * OZ's original sdbm hash
+ */
+static u_int32_t
+hash3(keyarg, len)
+ const void *keyarg;
+ register size_t len;
+{
+ register const u_char *key;
+ register size_t loop;
+ register u_int32_t h;
+
+#define HASHC h = *key++ + 65599 * h
+
+ h = 0;
+ key = keyarg;
+ if (len > 0) {
+ loop = (len + 8 - 1) >> 3;
+
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASHC;
+ /* FALLTHROUGH */
+ case 7:
+ HASHC;
+ /* FALLTHROUGH */
+ case 6:
+ HASHC;
+ /* FALLTHROUGH */
+ case 5:
+ HASHC;
+ /* FALLTHROUGH */
+ case 4:
+ HASHC;
+ /* FALLTHROUGH */
+ case 3:
+ HASHC;
+ /* FALLTHROUGH */
+ case 2:
+ HASHC;
+ /* FALLTHROUGH */
+ case 1:
+ HASHC;
+ } while (--loop);
+ }
+ }
+ return (h);
+}
+
+/* Hash function from Chris Torek. */
+static u_int32_t
+hash4(keyarg, len)
+ const void *keyarg;
+ register size_t len;
+{
+ register const u_char *key;
+ register size_t loop;
+ register u_int32_t h;
+
+#define HASH4a h = (h << 5) - h + *key++;
+#define HASH4b h = (h << 5) + h + *key++;
+#define HASH4 HASH4b
+
+ h = 0;
+ key = keyarg;
+ if (len > 0) {
+ loop = (len + 8 - 1) >> 3;
+
+ switch (len & (8 - 1)) {
+ case 0:
+ do {
+ HASH4;
+ /* FALLTHROUGH */
+ case 7:
+ HASH4;
+ /* FALLTHROUGH */
+ case 6:
+ HASH4;
+ /* FALLTHROUGH */
+ case 5:
+ HASH4;
+ /* FALLTHROUGH */
+ case 4:
+ HASH4;
+ /* FALLTHROUGH */
+ case 3:
+ HASH4;
+ /* FALLTHROUGH */
+ case 2:
+ HASH4;
+ /* FALLTHROUGH */
+ case 1:
+ HASH4;
+ } while (--loop);
+ }
+ }
+ return (h);
+}
diff --git a/db1-ast/hash/hash_log2.c b/db1-ast/hash/hash_log2.c
new file mode 100755
index 000000000..6bcf9c114
--- /dev/null
+++ b/db1-ast/hash/hash_log2.c
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hash_log2.c 8.2 (Berkeley) 5/31/94";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/types.h>
+
+#include <db.h>
+
+u_int32_t __hash_log2 __P((u_int32_t));
+
+u_int32_t
+__hash_log2(num)
+ u_int32_t num;
+{
+ register u_int32_t i, limit;
+
+ limit = 1;
+ for (i = 0; limit < num; limit = limit << 1, i++);
+ return (i);
+}
diff --git a/db1-ast/hash/hash_page.c b/db1-ast/hash/hash_page.c
new file mode 100755
index 000000000..e1dfe6b8d
--- /dev/null
+++ b/db1-ast/hash/hash_page.c
@@ -0,0 +1,944 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hash_page.c 8.7 (Berkeley) 8/16/94";
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * PACKAGE: hashing
+ *
+ * DESCRIPTION:
+ * Page manipulation for hashing package.
+ *
+ * ROUTINES:
+ *
+ * External
+ * __get_page
+ * __add_ovflpage
+ * Internal
+ * overflow_page
+ * open_temp
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#ifdef DEBUG
+#include <assert.h>
+#endif
+
+#include <db.h>
+#include "hash.h"
+#include "page.h"
+#include "extern.h"
+
+static u_int32_t *fetch_bitmap __P((HTAB *, int));
+static u_int32_t first_free __P((u_int32_t));
+static int open_temp __P((HTAB *));
+static u_int16_t overflow_page __P((HTAB *));
+static void putpair __P((char *, const DBT *, const DBT *));
+static void squeeze_key __P((u_int16_t *, const DBT *, const DBT *));
+static int ugly_split
+ __P((HTAB *, u_int32_t, BUFHEAD *, BUFHEAD *, int, int));
+
+#define PAGE_INIT(P) { \
+ ((u_int16_t *)(P))[0] = 0; \
+ ((u_int16_t *)(P))[1] = hashp->BSIZE - 3 * sizeof(u_int16_t); \
+ ((u_int16_t *)(P))[2] = hashp->BSIZE; \
+}
+
+/*
+ * This is called AFTER we have verified that there is room on the page for
+ * the pair (PAIRFITS has returned true) so we go right ahead and start moving
+ * stuff on.
+ */
+static void
+putpair(p, key, val)
+ char *p;
+ const DBT *key, *val;
+{
+ register u_int16_t *bp, n, off;
+
+ bp = (u_int16_t *)p;
+
+ /* Enter the key first. */
+ n = bp[0];
+
+ off = OFFSET(bp) - key->size;
+ memmove(p + off, key->data, key->size);
+ bp[++n] = off;
+
+ /* Now the data. */
+ off -= val->size;
+ memmove(p + off, val->data, val->size);
+ bp[++n] = off;
+
+ /* Adjust page info. */
+ bp[0] = n;
+ bp[n + 1] = off - ((n + 3) * sizeof(u_int16_t));
+ bp[n + 2] = off;
+}
+
+/*
+ * Returns:
+ * 0 OK
+ * -1 error
+ */
+extern int
+__delpair(hashp, bufp, ndx)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ register int ndx;
+{
+ register u_int16_t *bp, newoff;
+ register int n;
+ u_int16_t pairlen;
+
+ bp = (u_int16_t *)bufp->page;
+ n = bp[0];
+
+ if (bp[ndx + 1] < REAL_KEY)
+ return (__big_delete(hashp, bufp));
+ if (ndx != 1)
+ newoff = bp[ndx - 1];
+ else
+ newoff = hashp->BSIZE;
+ pairlen = newoff - bp[ndx + 1];
+
+ if (ndx != (n - 1)) {
+ /* Hard Case -- need to shuffle keys */
+ register int i;
+ register char *src = bufp->page + (int)OFFSET(bp);
+ register char *dst = src + (int)pairlen;
+ memmove(dst, src, bp[ndx + 1] - OFFSET(bp));
+
+ /* Now adjust the pointers */
+ for (i = ndx + 2; i <= n; i += 2) {
+ if (bp[i + 1] == OVFLPAGE) {
+ bp[i - 2] = bp[i];
+ bp[i - 1] = bp[i + 1];
+ } else {
+ bp[i - 2] = bp[i] + pairlen;
+ bp[i - 1] = bp[i + 1] + pairlen;
+ }
+ }
+ }
+ /* Finally adjust the page data */
+ bp[n] = OFFSET(bp) + pairlen;
+ bp[n - 1] = bp[n + 1] + pairlen + 2 * sizeof(u_int16_t);
+ bp[0] = n - 2;
+ hashp->NKEYS--;
+
+ bufp->flags |= BUF_MOD;
+ return (0);
+}
+/*
+ * Returns:
+ * 0 ==> OK
+ * -1 ==> Error
+ */
+extern int
+__split_page(hashp, obucket, nbucket)
+ HTAB *hashp;
+ u_int32_t obucket, nbucket;
+{
+ register BUFHEAD *new_bufp, *old_bufp;
+ register u_int16_t *ino;
+ register char *np;
+ DBT key, val;
+ int n, ndx, retval;
+ u_int16_t copyto, diff, off, moved;
+ char *op;
+
+ copyto = (u_int16_t)hashp->BSIZE;
+ off = (u_int16_t)hashp->BSIZE;
+ old_bufp = __get_buf(hashp, obucket, NULL, 0);
+ if (old_bufp == NULL)
+ return (-1);
+ new_bufp = __get_buf(hashp, nbucket, NULL, 0);
+ if (new_bufp == NULL)
+ return (-1);
+
+ old_bufp->flags |= (BUF_MOD | BUF_PIN);
+ new_bufp->flags |= (BUF_MOD | BUF_PIN);
+
+ ino = (u_int16_t *)(op = old_bufp->page);
+ np = new_bufp->page;
+
+ moved = 0;
+
+ for (n = 1, ndx = 1; n < ino[0]; n += 2) {
+ if (ino[n + 1] < REAL_KEY) {
+ retval = ugly_split(hashp, obucket, old_bufp, new_bufp,
+ (int)copyto, (int)moved);
+ old_bufp->flags &= ~BUF_PIN;
+ new_bufp->flags &= ~BUF_PIN;
+ return (retval);
+
+ }
+ key.data = (u_char *)op + ino[n];
+ key.size = off - ino[n];
+
+ if (__call_hash(hashp, key.data, key.size) == obucket) {
+ /* Don't switch page */
+ diff = copyto - off;
+ if (diff) {
+ copyto = ino[n + 1] + diff;
+ memmove(op + copyto, op + ino[n + 1],
+ off - ino[n + 1]);
+ ino[ndx] = copyto + ino[n] - ino[n + 1];
+ ino[ndx + 1] = copyto;
+ } else
+ copyto = ino[n + 1];
+ ndx += 2;
+ } else {
+ /* Switch page */
+ val.data = (u_char *)op + ino[n + 1];
+ val.size = ino[n] - ino[n + 1];
+ putpair(np, &key, &val);
+ moved += 2;
+ }
+
+ off = ino[n + 1];
+ }
+
+ /* Now clean up the page */
+ ino[0] -= moved;
+ FREESPACE(ino) = copyto - sizeof(u_int16_t) * (ino[0] + 3);
+ OFFSET(ino) = copyto;
+
+#ifdef DEBUG3
+ (void)fprintf(stderr, "split %d/%d\n",
+ ((u_int16_t *)np)[0] / 2,
+ ((u_int16_t *)op)[0] / 2);
+#endif
+ /* unpin both pages */
+ old_bufp->flags &= ~BUF_PIN;
+ new_bufp->flags &= ~BUF_PIN;
+ return (0);
+}
+
+/*
+ * Called when we encounter an overflow or big key/data page during split
+ * handling. This is special cased since we have to begin checking whether
+ * the key/data pairs fit on their respective pages and because we may need
+ * overflow pages for both the old and new pages.
+ *
+ * The first page might be a page with regular key/data pairs in which case
+ * we have a regular overflow condition and just need to go on to the next
+ * page or it might be a big key/data pair in which case we need to fix the
+ * big key/data pair.
+ *
+ * Returns:
+ * 0 ==> success
+ * -1 ==> failure
+ */
+static int
+ugly_split(hashp, obucket, old_bufp, new_bufp, copyto, moved)
+ HTAB *hashp;
+ u_int32_t obucket; /* Same as __split_page. */
+ BUFHEAD *old_bufp, *new_bufp;
+ int copyto; /* First byte on page which contains key/data values. */
+ int moved; /* Number of pairs moved to new page. */
+{
+ register BUFHEAD *bufp; /* Buffer header for ino */
+ register u_int16_t *ino; /* Page keys come off of */
+ register u_int16_t *np; /* New page */
+ register u_int16_t *op; /* Page keys go on to if they aren't moving */
+
+ BUFHEAD *last_bfp; /* Last buf header OVFL needing to be freed */
+ DBT key, val;
+ SPLIT_RETURN ret;
+ u_int16_t n, off, ov_addr, scopyto;
+ char *cino; /* Character value of ino */
+
+ bufp = old_bufp;
+ ino = (u_int16_t *)old_bufp->page;
+ np = (u_int16_t *)new_bufp->page;
+ op = (u_int16_t *)old_bufp->page;
+ last_bfp = NULL;
+ scopyto = (u_int16_t)copyto; /* ANSI */
+
+ n = ino[0] - 1;
+ while (n < ino[0]) {
+ if (ino[2] < REAL_KEY && ino[2] != OVFLPAGE) {
+ if (__big_split(hashp, old_bufp,
+ new_bufp, bufp, bufp->addr, obucket, &ret))
+ return (-1);
+ old_bufp = ret.oldp;
+ if (!old_bufp)
+ return (-1);
+ op = (u_int16_t *)old_bufp->page;
+ new_bufp = ret.newp;
+ if (!new_bufp)
+ return (-1);
+ np = (u_int16_t *)new_bufp->page;
+ bufp = ret.nextp;
+ if (!bufp)
+ return (0);
+ cino = (char *)bufp->page;
+ ino = (u_int16_t *)cino;
+ last_bfp = ret.nextp;
+ } else if (ino[n + 1] == OVFLPAGE) {
+ ov_addr = ino[n];
+ /*
+ * Fix up the old page -- the extra 2 are the fields
+ * which contained the overflow information.
+ */
+ ino[0] -= (moved + 2);
+ FREESPACE(ino) =
+ scopyto - sizeof(u_int16_t) * (ino[0] + 3);
+ OFFSET(ino) = scopyto;
+
+ bufp = __get_buf(hashp, ov_addr, bufp, 0);
+ if (!bufp)
+ return (-1);
+
+ ino = (u_int16_t *)bufp->page;
+ n = 1;
+ scopyto = hashp->BSIZE;
+ moved = 0;
+
+ if (last_bfp)
+ __free_ovflpage(hashp, last_bfp);
+ last_bfp = bufp;
+ }
+ /* Move regular sized pairs of there are any */
+ off = hashp->BSIZE;
+ for (n = 1; (n < ino[0]) && (ino[n + 1] >= REAL_KEY); n += 2) {
+ cino = (char *)ino;
+ key.data = (u_char *)cino + ino[n];
+ key.size = off - ino[n];
+ val.data = (u_char *)cino + ino[n + 1];
+ val.size = ino[n] - ino[n + 1];
+ off = ino[n + 1];
+
+ if (__call_hash(hashp, key.data, key.size) == obucket) {
+ /* Keep on old page */
+ if (PAIRFITS(op, (&key), (&val)))
+ putpair((char *)op, &key, &val);
+ else {
+ old_bufp =
+ __add_ovflpage(hashp, old_bufp);
+ if (!old_bufp)
+ return (-1);
+ op = (u_int16_t *)old_bufp->page;
+ putpair((char *)op, &key, &val);
+ }
+ old_bufp->flags |= BUF_MOD;
+ } else {
+ /* Move to new page */
+ if (PAIRFITS(np, (&key), (&val)))
+ putpair((char *)np, &key, &val);
+ else {
+ new_bufp =
+ __add_ovflpage(hashp, new_bufp);
+ if (!new_bufp)
+ return (-1);
+ np = (u_int16_t *)new_bufp->page;
+ putpair((char *)np, &key, &val);
+ }
+ new_bufp->flags |= BUF_MOD;
+ }
+ }
+ }
+ if (last_bfp)
+ __free_ovflpage(hashp, last_bfp);
+ return (0);
+}
+
+/*
+ * Add the given pair to the page
+ *
+ * Returns:
+ * 0 ==> OK
+ * 1 ==> failure
+ */
+extern int
+__addel(hashp, bufp, key, val)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+ const DBT *key, *val;
+{
+ register u_int16_t *bp, *sop;
+ int do_expand;
+
+ bp = (u_int16_t *)bufp->page;
+ do_expand = 0;
+ while (bp[0] && (bp[2] < REAL_KEY || bp[bp[0]] < REAL_KEY))
+ /* Exception case */
+ if (bp[2] == FULL_KEY_DATA && bp[0] == 2)
+ /* This is the last page of a big key/data pair
+ and we need to add another page */
+ break;
+ else if (bp[2] < REAL_KEY && bp[bp[0]] != OVFLPAGE) {
+ bufp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!bufp)
+ return (-1);
+ bp = (u_int16_t *)bufp->page;
+ } else
+ /* Try to squeeze key on this page */
+ if (FREESPACE(bp) > PAIRSIZE(key, val)) {
+ squeeze_key(bp, key, val);
+ return (0);
+ } else {
+ bufp = __get_buf(hashp, bp[bp[0] - 1], bufp, 0);
+ if (!bufp)
+ return (-1);
+ bp = (u_int16_t *)bufp->page;
+ }
+
+ if (PAIRFITS(bp, key, val))
+ putpair(bufp->page, key, val);
+ else {
+ do_expand = 1;
+ bufp = __add_ovflpage(hashp, bufp);
+ if (!bufp)
+ return (-1);
+ sop = (u_int16_t *)bufp->page;
+
+ if (PAIRFITS(sop, key, val))
+ putpair((char *)sop, key, val);
+ else
+ if (__big_insert(hashp, bufp, key, val))
+ return (-1);
+ }
+ bufp->flags |= BUF_MOD;
+ /*
+ * If the average number of keys per bucket exceeds the fill factor,
+ * expand the table.
+ */
+ hashp->NKEYS++;
+ if (do_expand ||
+ (hashp->NKEYS / (hashp->MAX_BUCKET + 1) > hashp->FFACTOR))
+ return (__expand_table(hashp));
+ return (0);
+}
+
+/*
+ *
+ * Returns:
+ * pointer on success
+ * NULL on error
+ */
+extern BUFHEAD *
+__add_ovflpage(hashp, bufp)
+ HTAB *hashp;
+ BUFHEAD *bufp;
+{
+ register u_int16_t *sp;
+ u_int16_t ndx, ovfl_num;
+#ifdef DEBUG1
+ int tmp1, tmp2;
+#endif
+ sp = (u_int16_t *)bufp->page;
+
+ /* Check if we are dynamically determining the fill factor */
+ if (hashp->FFACTOR == DEF_FFACTOR) {
+ hashp->FFACTOR = sp[0] >> 1;
+ if (hashp->FFACTOR < MIN_FFACTOR)
+ hashp->FFACTOR = MIN_FFACTOR;
+ }
+ bufp->flags |= BUF_MOD;
+ ovfl_num = overflow_page(hashp);
+#ifdef DEBUG1
+ tmp1 = bufp->addr;
+ tmp2 = bufp->ovfl ? bufp->ovfl->addr : 0;
+#endif
+ if (!ovfl_num || !(bufp->ovfl = __get_buf(hashp, ovfl_num, bufp, 1)))
+ return (NULL);
+ bufp->ovfl->flags |= BUF_MOD;
+#ifdef DEBUG1
+ (void)fprintf(stderr, "ADDOVFLPAGE: %d->ovfl was %d is now %d\n",
+ tmp1, tmp2, bufp->ovfl->addr);
+#endif
+ ndx = sp[0];
+ /*
+ * Since a pair is allocated on a page only if there's room to add
+ * an overflow page, we know that the OVFL information will fit on
+ * the page.
+ */
+ sp[ndx + 4] = OFFSET(sp);
+ sp[ndx + 3] = FREESPACE(sp) - OVFLSIZE;
+ sp[ndx + 1] = ovfl_num;
+ sp[ndx + 2] = OVFLPAGE;
+ sp[0] = ndx + 2;
+#ifdef HASH_STATISTICS
+ hash_overflows++;
+#endif
+ return (bufp->ovfl);
+}
+
+/*
+ * Returns:
+ * 0 indicates SUCCESS
+ * -1 indicates FAILURE
+ */
+extern int
+__get_page(hashp, p, bucket, is_bucket, is_disk, is_bitmap)
+ HTAB *hashp;
+ char *p;
+ u_int32_t bucket;
+ int is_bucket, is_disk, is_bitmap;
+{
+ register int fd, page, size;
+ int rsize;
+ u_int16_t *bp;
+
+ fd = hashp->fp;
+ size = hashp->BSIZE;
+
+ if ((fd == -1) || !is_disk) {
+ PAGE_INIT(p);
+ return (0);
+ }
+ if (is_bucket)
+ page = BUCKET_TO_PAGE(bucket);
+ else
+ page = OADDR_TO_PAGE(bucket);
+ if ((lseek(fd, (off_t)page << hashp->BSHIFT, SEEK_SET) == -1) ||
+ ((rsize = read(fd, p, size)) == -1))
+ return (-1);
+ bp = (u_int16_t *)p;
+ if (!rsize)
+ bp[0] = 0; /* We hit the EOF, so initialize a new page */
+ else
+ if (rsize != size) {
+ errno = EFTYPE;
+ return (-1);
+ }
+ if (!is_bitmap && !bp[0]) {
+ PAGE_INIT(p);
+ } else
+ if (hashp->LORDER != BYTE_ORDER) {
+ register int i, max;
+
+ if (is_bitmap) {
+ max = hashp->BSIZE >> 2; /* divide by 4 */
+ for (i = 0; i < max; i++)
+ M_32_SWAP(((int *)p)[i]);
+ } else {
+ M_16_SWAP(bp[0]);
+ max = bp[0] + 2;
+ for (i = 1; i <= max; i++)
+ M_16_SWAP(bp[i]);
+ }
+ }
+ return (0);
+}
+
+/*
+ * Write page p to disk
+ *
+ * Returns:
+ * 0 ==> OK
+ * -1 ==>failure
+ */
+extern int
+__put_page(hashp, p, bucket, is_bucket, is_bitmap)
+ HTAB *hashp;
+ char *p;
+ u_int32_t bucket;
+ int is_bucket, is_bitmap;
+{
+ register int fd, page, size;
+ int wsize;
+
+ size = hashp->BSIZE;
+ if ((hashp->fp == -1) && open_temp(hashp))
+ return (-1);
+ fd = hashp->fp;
+
+ if (hashp->LORDER != BYTE_ORDER) {
+ register int i;
+ register int max;
+
+ if (is_bitmap) {
+ max = hashp->BSIZE >> 2; /* divide by 4 */
+ for (i = 0; i < max; i++)
+ M_32_SWAP(((int *)p)[i]);
+ } else {
+ max = ((u_int16_t *)p)[0] + 2;
+ for (i = 0; i <= max; i++)
+ M_16_SWAP(((u_int16_t *)p)[i]);
+ }
+ }
+ if (is_bucket)
+ page = BUCKET_TO_PAGE(bucket);
+ else
+ page = OADDR_TO_PAGE(bucket);
+ if ((lseek(fd, (off_t)page << hashp->BSHIFT, SEEK_SET) == -1) ||
+ ((wsize = write(fd, p, size)) == -1))
+ /* Errno is set */
+ return (-1);
+ if (wsize != size) {
+ errno = EFTYPE;
+ return (-1);
+ }
+ return (0);
+}
+
+#define BYTE_MASK ((1 << INT_BYTE_SHIFT) -1)
+/*
+ * Initialize a new bitmap page. Bitmap pages are left in memory
+ * once they are read in.
+ */
+extern int
+__ibitmap(hashp, pnum, nbits, ndx)
+ HTAB *hashp;
+ int pnum, nbits, ndx;
+{
+ u_int32_t *ip;
+ int clearbytes, clearints;
+
+ if ((ip = (u_int32_t *)malloc(hashp->BSIZE)) == NULL)
+ return (1);
+ hashp->nmaps++;
+ clearints = ((nbits - 1) >> INT_BYTE_SHIFT) + 1;
+ clearbytes = clearints << INT_TO_BYTE;
+ (void)memset((char *)ip, 0, clearbytes);
+ (void)memset(((char *)ip) + clearbytes, 0xFF,
+ hashp->BSIZE - clearbytes);
+ ip[clearints - 1] = ALL_SET << (nbits & BYTE_MASK);
+ SETBIT(ip, 0);
+ hashp->BITMAPS[ndx] = (u_int16_t)pnum;
+ hashp->mapp[ndx] = ip;
+ return (0);
+}
+
+static u_int32_t
+first_free(map)
+ u_int32_t map;
+{
+ register u_int32_t i, mask;
+
+ mask = 0x1;
+ for (i = 0; i < BITS_PER_MAP; i++) {
+ if (!(mask & map))
+ return (i);
+ mask = mask << 1;
+ }
+ return (i);
+}
+
+static u_int16_t
+overflow_page(hashp)
+ HTAB *hashp;
+{
+ register u_int32_t *freep;
+ register int max_free, offset, splitnum;
+ u_int16_t addr;
+ int bit, first_page, free_bit, free_page, i, in_use_bits, j;
+#ifdef DEBUG2
+ int tmp1, tmp2;
+#endif
+ splitnum = hashp->OVFL_POINT;
+ max_free = hashp->SPARES[splitnum];
+
+ free_page = (max_free - 1) >> (hashp->BSHIFT + BYTE_SHIFT);
+ free_bit = (max_free - 1) & ((hashp->BSIZE << BYTE_SHIFT) - 1);
+
+ /* Look through all the free maps to find the first free block */
+ first_page = hashp->LAST_FREED >>(hashp->BSHIFT + BYTE_SHIFT);
+ for ( i = first_page; i <= free_page; i++ ) {
+ if (!(freep = (u_int32_t *)hashp->mapp[i]) &&
+ !(freep = fetch_bitmap(hashp, i)))
+ return (0);
+ if (i == free_page)
+ in_use_bits = free_bit;
+ else
+ in_use_bits = (hashp->BSIZE << BYTE_SHIFT) - 1;
+
+ if (i == first_page) {
+ bit = hashp->LAST_FREED &
+ ((hashp->BSIZE << BYTE_SHIFT) - 1);
+ j = bit / BITS_PER_MAP;
+ bit = bit & ~(BITS_PER_MAP - 1);
+ } else {
+ bit = 0;
+ j = 0;
+ }
+ for (; bit <= in_use_bits; j++, bit += BITS_PER_MAP)
+ if (freep[j] != ALL_SET)
+ goto found;
+ }
+
+ /* No Free Page Found */
+ hashp->LAST_FREED = hashp->SPARES[splitnum];
+ hashp->SPARES[splitnum]++;
+ offset = hashp->SPARES[splitnum] -
+ (splitnum ? hashp->SPARES[splitnum - 1] : 0);
+
+#define OVMSG "HASH: Out of overflow pages. Increase page size\n"
+ if (offset > SPLITMASK) {
+ if (++splitnum >= NCACHED) {
+ (void)write(STDERR_FILENO, OVMSG, sizeof(OVMSG) - 1);
+ return (0);
+ }
+ hashp->OVFL_POINT = splitnum;
+ hashp->SPARES[splitnum] = hashp->SPARES[splitnum-1];
+ hashp->SPARES[splitnum-1]--;
+ offset = 1;
+ }
+
+ /* Check if we need to allocate a new bitmap page */
+ if (free_bit == (hashp->BSIZE << BYTE_SHIFT) - 1) {
+ free_page++;
+ if (free_page >= NCACHED) {
+ (void)write(STDERR_FILENO, OVMSG, sizeof(OVMSG) - 1);
+ return (0);
+ }
+ /*
+ * This is tricky. The 1 indicates that you want the new page
+ * allocated with 1 clear bit. Actually, you are going to
+ * allocate 2 pages from this map. The first is going to be
+ * the map page, the second is the overflow page we were
+ * looking for. The init_bitmap routine automatically, sets
+ * the first bit of itself to indicate that the bitmap itself
+ * is in use. We would explicitly set the second bit, but
+ * don't have to if we tell init_bitmap not to leave it clear
+ * in the first place.
+ */
+ if (__ibitmap(hashp,
+ (int)OADDR_OF(splitnum, offset), 1, free_page))
+ return (0);
+ hashp->SPARES[splitnum]++;
+#ifdef DEBUG2
+ free_bit = 2;
+#endif
+ offset++;
+ if (offset > SPLITMASK) {
+ if (++splitnum >= NCACHED) {
+ (void)write(STDERR_FILENO, OVMSG,
+ sizeof(OVMSG) - 1);
+ return (0);
+ }
+ hashp->OVFL_POINT = splitnum;
+ hashp->SPARES[splitnum] = hashp->SPARES[splitnum-1];
+ hashp->SPARES[splitnum-1]--;
+ offset = 0;
+ }
+ } else {
+ /*
+ * Free_bit addresses the last used bit. Bump it to address
+ * the first available bit.
+ */
+ free_bit++;
+ SETBIT(freep, free_bit);
+ }
+
+ /* Calculate address of the new overflow page */
+ addr = OADDR_OF(splitnum, offset);
+#ifdef DEBUG2
+ (void)fprintf(stderr, "OVERFLOW_PAGE: ADDR: %d BIT: %d PAGE %d\n",
+ addr, free_bit, free_page);
+#endif
+ return (addr);
+
+found:
+ bit = bit + first_free(freep[j]);
+ SETBIT(freep, bit);
+#ifdef DEBUG2
+ tmp1 = bit;
+ tmp2 = i;
+#endif
+ /*
+ * Bits are addressed starting with 0, but overflow pages are addressed
+ * beginning at 1. Bit is a bit addressnumber, so we need to increment
+ * it to convert it to a page number.
+ */
+ bit = 1 + bit + (i * (hashp->BSIZE << BYTE_SHIFT));
+ if (bit >= hashp->LAST_FREED)
+ hashp->LAST_FREED = bit - 1;
+
+ /* Calculate the split number for this page */
+ for (i = 0; (i < splitnum) && (bit > hashp->SPARES[i]); i++);
+ offset = (i ? bit - hashp->SPARES[i - 1] : bit);
+ if (offset >= SPLITMASK)
+ return (0); /* Out of overflow pages */
+ addr = OADDR_OF(i, offset);
+#ifdef DEBUG2
+ (void)fprintf(stderr, "OVERFLOW_PAGE: ADDR: %d BIT: %d PAGE %d\n",
+ addr, tmp1, tmp2);
+#endif
+
+ /* Allocate and return the overflow page */
+ return (addr);
+}
+
+/*
+ * Mark this overflow page as free.
+ */
+extern void
+__free_ovflpage(hashp, obufp)
+ HTAB *hashp;
+ BUFHEAD *obufp;
+{
+ register u_int16_t addr;
+ u_int32_t *freep;
+ int bit_address, free_page, free_bit;
+ u_int16_t ndx;
+
+ addr = obufp->addr;
+#ifdef DEBUG1
+ (void)fprintf(stderr, "Freeing %d\n", addr);
+#endif
+ ndx = (((u_int16_t)addr) >> SPLITSHIFT);
+ bit_address =
+ (ndx ? hashp->SPARES[ndx - 1] : 0) + (addr & SPLITMASK) - 1;
+ if (bit_address < hashp->LAST_FREED)
+ hashp->LAST_FREED = bit_address;
+ free_page = (bit_address >> (hashp->BSHIFT + BYTE_SHIFT));
+ free_bit = bit_address & ((hashp->BSIZE << BYTE_SHIFT) - 1);
+
+ if (!(freep = hashp->mapp[free_page]))
+ freep = fetch_bitmap(hashp, free_page);
+#ifdef DEBUG
+ /*
+ * This had better never happen. It means we tried to read a bitmap
+ * that has already had overflow pages allocated off it, and we
+ * failed to read it from the file.
+ */
+ if (!freep)
+ assert(0);
+#endif
+ CLRBIT(freep, free_bit);
+#ifdef DEBUG2
+ (void)fprintf(stderr, "FREE_OVFLPAGE: ADDR: %d BIT: %d PAGE %d\n",
+ obufp->addr, free_bit, free_page);
+#endif
+ __reclaim_buf(hashp, obufp);
+}
+
+/*
+ * Returns:
+ * 0 success
+ * -1 failure
+ */
+static int
+open_temp(hashp)
+ HTAB *hashp;
+{
+ sigset_t set, oset;
+ static char namestr[] = "_hashXXXXXX";
+
+ /* Block signals; make sure file goes away at process exit. */
+ (void)sigfillset(&set);
+ (void)sigprocmask(SIG_BLOCK, &set, &oset);
+ if ((hashp->fp = mkstemp(namestr)) != -1) {
+ (void)unlink(namestr);
+ (void)fcntl(hashp->fp, F_SETFD, 1);
+ }
+ (void)sigprocmask(SIG_SETMASK, &oset, (sigset_t *)NULL);
+ return (hashp->fp != -1 ? 0 : -1);
+}
+
+/*
+ * We have to know that the key will fit, but the last entry on the page is
+ * an overflow pair, so we need to shift things.
+ */
+static void
+squeeze_key(sp, key, val)
+ u_int16_t *sp;
+ const DBT *key, *val;
+{
+ register char *p;
+ u_int16_t free_space, n, off, pageno;
+
+ p = (char *)sp;
+ n = sp[0];
+ free_space = FREESPACE(sp);
+ off = OFFSET(sp);
+
+ pageno = sp[n - 1];
+ off -= key->size;
+ sp[n - 1] = off;
+ memmove(p + off, key->data, key->size);
+ off -= val->size;
+ sp[n] = off;
+ memmove(p + off, val->data, val->size);
+ sp[0] = n + 2;
+ sp[n + 1] = pageno;
+ sp[n + 2] = OVFLPAGE;
+ FREESPACE(sp) = free_space - PAIRSIZE(key, val);
+ OFFSET(sp) = off;
+}
+
+static u_int32_t *
+fetch_bitmap(hashp, ndx)
+ HTAB *hashp;
+ int ndx;
+{
+ if (ndx >= hashp->nmaps)
+ return (NULL);
+ if ((hashp->mapp[ndx] = (u_int32_t *)malloc(hashp->BSIZE)) == NULL)
+ return (NULL);
+ if (__get_page(hashp,
+ (char *)hashp->mapp[ndx], hashp->BITMAPS[ndx], 0, 1, 1)) {
+ free(hashp->mapp[ndx]);
+ return (NULL);
+ }
+ return (hashp->mapp[ndx]);
+}
+
+#ifdef DEBUG4
+int
+print_chain(addr)
+ int addr;
+{
+ BUFHEAD *bufp;
+ short *bp, oaddr;
+
+ (void)fprintf(stderr, "%d ", addr);
+ bufp = __get_buf(hashp, addr, NULL, 0);
+ bp = (short *)bufp->page;
+ while (bp[0] && ((bp[bp[0]] == OVFLPAGE) ||
+ ((bp[0] > 2) && bp[2] < REAL_KEY))) {
+ oaddr = bp[bp[0] - 1];
+ (void)fprintf(stderr, "%d ", (int)oaddr);
+ bufp = __get_buf(hashp, (int)oaddr, bufp, 0);
+ bp = (short *)bufp->page;
+ }
+ (void)fprintf(stderr, "\n");
+}
+#endif
diff --git a/db1-ast/hash/hsearch.c b/db1-ast/hash/hsearch.c
new file mode 100755
index 000000000..cc8f7a4aa
--- /dev/null
+++ b/db1-ast/hash/hsearch.c
@@ -0,0 +1,107 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)hsearch.c 8.4 (Berkeley) 7/21/94";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <string.h>
+
+#include <db.h>
+#include "search.h"
+
+static DB *dbp = NULL;
+static ENTRY retval;
+
+extern int
+hcreate(nel)
+ u_int nel;
+{
+ HASHINFO info;
+
+ info.nelem = nel;
+ info.bsize = 256;
+ info.ffactor = 8;
+ info.cachesize = 0;
+ info.hash = NULL;
+ info.lorder = 0;
+ dbp = (DB *)__hash_open(NULL, O_CREAT | O_RDWR, 0600, &info, 0);
+ return ((int)dbp);
+}
+
+extern ENTRY *
+hsearch(item, action)
+ ENTRY item;
+ ACTION action;
+{
+ DBT key, val;
+ int status;
+
+ if (!dbp)
+ return (NULL);
+ key.data = (u_char *)item.key;
+ key.size = strlen(item.key) + 1;
+
+ if (action == ENTER) {
+ val.data = (u_char *)item.data;
+ val.size = strlen(item.data) + 1;
+ status = (dbp->put)(dbp, &key, &val, R_NOOVERWRITE);
+ if (status)
+ return (NULL);
+ } else {
+ /* FIND */
+ status = (dbp->get)(dbp, &key, &val, 0);
+ if (status)
+ return (NULL);
+ else
+ item.data = (char *)val.data;
+ }
+ retval.key = item.key;
+ retval.data = item.data;
+ return (&retval);
+}
+
+extern void
+hdestroy()
+{
+ if (dbp) {
+ (void)(dbp->close)(dbp);
+ dbp = NULL;
+ }
+}
diff --git a/db1-ast/hash/ndbm.c b/db1-ast/hash/ndbm.c
new file mode 100755
index 000000000..83aa766c3
--- /dev/null
+++ b/db1-ast/hash/ndbm.c
@@ -0,0 +1,215 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)ndbm.c 8.4 (Berkeley) 7/21/94";
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * This package provides a dbm compatible interface to the new hashing
+ * package described in db(3).
+ */
+
+#include <sys/param.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <ndbm.h>
+#include "hash.h"
+
+/*
+ * Returns:
+ * *DBM on success
+ * NULL on failure
+ */
+extern DBM *
+dbm_open(file, flags, mode)
+ const char *file;
+ int flags, mode;
+{
+ DBM *db;
+ HASHINFO info;
+ const size_t len = strlen(file) + sizeof (DBM_SUFFIX);
+#ifdef __GNUC__
+ char path[len];
+#else
+ char *path = malloc(len);
+ if (path == NULL)
+ return NULL;
+#endif
+
+ info.bsize = 4096;
+ info.ffactor = 40;
+ info.nelem = 1;
+ info.cachesize = 0;
+ info.hash = NULL;
+ info.lorder = 0;
+ (void)strcpy(path, file);
+ (void)strcat(path, DBM_SUFFIX);
+ db = (DBM *)__hash_open(path, flags, mode, &info, 0);
+#ifndef __GNUC__
+ free(path);
+#endif
+ return db;
+}
+
+extern void
+dbm_close(db)
+ DBM *db;
+{
+ (void)(db->close)(db);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+extern datum
+dbm_fetch(db, key)
+ DBM *db;
+ datum key;
+{
+ datum retval;
+ int status;
+
+ status = (db->get)(db, (DBT *)&key, (DBT *)&retval, 0);
+ if (status) {
+ retval.dptr = NULL;
+ retval.dsize = 0;
+ }
+ return (retval);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+extern datum
+dbm_firstkey(db)
+ DBM *db;
+{
+ int status;
+ datum retdata, retkey;
+
+ status = (db->seq)(db, (DBT *)&retkey, (DBT *)&retdata, R_FIRST);
+ if (status)
+ retkey.dptr = NULL;
+ return (retkey);
+}
+
+/*
+ * Returns:
+ * DATUM on success
+ * NULL on failure
+ */
+extern datum
+dbm_nextkey(db)
+ DBM *db;
+{
+ int status;
+ datum retdata, retkey;
+
+ status = (db->seq)(db, (DBT *)&retkey, (DBT *)&retdata, R_NEXT);
+ if (status)
+ retkey.dptr = NULL;
+ return (retkey);
+}
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ */
+extern int
+dbm_delete(db, key)
+ DBM *db;
+ datum key;
+{
+ int status;
+
+ status = (db->del)(db, (DBT *)&key, 0);
+ if (status)
+ return (-1);
+ else
+ return (0);
+}
+
+/*
+ * Returns:
+ * 0 on success
+ * <0 failure
+ * 1 if DBM_INSERT and entry exists
+ */
+extern int
+dbm_store(db, key, content, flags)
+ DBM *db;
+ datum key, content;
+ int flags;
+{
+ return ((db->put)(db, (DBT *)&key, (DBT *)&content,
+ (flags == DBM_INSERT) ? R_NOOVERWRITE : 0));
+}
+
+extern int
+dbm_error(db)
+ DBM *db;
+{
+ HTAB *hp;
+
+ hp = (HTAB *)db->internal;
+ return (hp->errnum);
+}
+
+extern int
+dbm_clearerr(db)
+ DBM *db;
+{
+ HTAB *hp;
+
+ hp = (HTAB *)db->internal;
+ hp->errnum = 0;
+ return (0);
+}
+
+extern int
+dbm_dirfno(db)
+ DBM *db;
+{
+ return(((HTAB *)db->internal)->fp);
+}
diff --git a/db1-ast/hash/page.h b/db1-ast/hash/page.h
new file mode 100755
index 000000000..0fc0d5a3e
--- /dev/null
+++ b/db1-ast/hash/page.h
@@ -0,0 +1,92 @@
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)page.h 8.2 (Berkeley) 5/31/94
+ */
+
+/*
+ * Definitions for hashing page file format.
+ */
+
+/*
+ * routines dealing with a data page
+ *
+ * page format:
+ * +------------------------------+
+ * p | n | keyoff | datoff | keyoff |
+ * +------------+--------+--------+
+ * | datoff | free | ptr | --> |
+ * +--------+---------------------+
+ * | F R E E A R E A |
+ * +--------------+---------------+
+ * | <---- - - - | data |
+ * +--------+-----+----+----------+
+ * | key | data | key |
+ * +--------+----------+----------+
+ *
+ * Pointer to the free space is always: p[p[0] + 2]
+ * Amount of free space on the page is: p[p[0] + 1]
+ */
+
+/*
+ * How many bytes required for this pair?
+ * 2 shorts in the table at the top of the page + room for the
+ * key and room for the data
+ *
+ * We prohibit entering a pair on a page unless there is also room to append
+ * an overflow page. The reason for this it that you can get in a situation
+ * where a single key/data pair fits on a page, but you can't append an
+ * overflow page and later you'd have to split the key/data and handle like
+ * a big pair.
+ * You might as well do this up front.
+ */
+
+#define PAIRSIZE(K,D) (2*sizeof(u_int16_t) + (K)->size + (D)->size)
+#define BIGOVERHEAD (4*sizeof(u_int16_t))
+#define KEYSIZE(K) (4*sizeof(u_int16_t) + (K)->size);
+#define OVFLSIZE (2*sizeof(u_int16_t))
+#define FREESPACE(P) ((P)[(P)[0]+1])
+#define OFFSET(P) ((P)[(P)[0]+2])
+#define PAIRFITS(P,K,D) \
+ (((P)[2] >= REAL_KEY) && \
+ (PAIRSIZE((K),(D)) + OVFLSIZE) <= FREESPACE((P)))
+#define PAGE_META(N) (((N)+3) * sizeof(u_int16_t))
+
+typedef struct {
+ BUFHEAD *newp;
+ BUFHEAD *oldp;
+ BUFHEAD *nextp;
+ u_int16_t next_addr;
+} SPLIT_RETURN;
diff --git a/db1-ast/hash/search.h b/db1-ast/hash/search.h
new file mode 100755
index 000000000..4d3b9143e
--- /dev/null
+++ b/db1-ast/hash/search.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Margo Seltzer.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)search.h 8.1 (Berkeley) 6/4/93
+ */
+
+/* Backward compatibility to hsearch interface. */
+typedef struct entry {
+ char *key;
+ char *data;
+} ENTRY;
+
+typedef enum {
+ FIND, ENTER
+} ACTION;
+
+int hcreate __P((unsigned int));
+void hdestroy __P((void));
+ENTRY *hsearch __P((ENTRY, ACTION));