2 * Copyright (C) 1994-1999, Index Data
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.17 1999-11-30 13:48:04 adam
8 * Improved installation. Updated for inclusion of YAZ header files.
10 * Revision 1.16 1999/05/26 07:49:14 adam
13 * Revision 1.15 1999/02/02 14:51:22 adam
14 * Updated WIN32 code specific sections. Changed header.
16 * Revision 1.14 1997/09/09 13:38:12 adam
17 * Partial port to WIN95/NT.
19 * Revision 1.13 1996/10/29 13:56:57 adam
20 * Include of zebrautl.h instead of alexutil.h.
22 * Revision 1.12 1996/03/20 16:17:11 quinn
25 * Revision 1.11 1996/03/20 13:29:17 quinn
28 * Revision 1.10 1996/03/19 19:22:44 quinn
31 * Revision 1.9 1996/02/06 10:19:57 quinn
32 * Attempt at fixing bug. Not all blocks were read before they were unlinked
33 * prior to a remap operation.
35 * Revision 1.8 1996/01/29 09:47:11 quinn
36 * Fixed mean little bug in the read-table code.
38 * Revision 1.7 1995/12/06 14:48:27 quinn
39 * Fixed some strange bugs.
41 * Revision 1.6 1995/09/04 12:33:47 adam
42 * Various cleanup. YAZ util used instead.
44 * Revision 1.5 1994/09/28 11:29:33 quinn
45 * Added cmp parameter.
47 * Revision 1.4 1994/09/27 20:03:53 quinn
48 * Seems relatively bug-free.
50 * Revision 1.3 1994/09/26 17:11:31 quinn
53 * Revision 1.2 1994/09/26 17:06:36 quinn
56 * Revision 1.1 1994/09/26 16:07:57 quinn
57 * Most of the functionality in place.
62 * This module handles the representation of tables in the bfiles.
72 static int is_freestore_alloc(ISAM is, int type)
76 if (is->types[type].freelist >= 0)
78 tmp = is->types[type].freelist;
79 if (bf_read(is->types[type].bf, tmp, 0, sizeof(tmp),
80 &is->types[type].freelist) <=0)
82 logf (LOG_FATAL, "Failed to allocate block");
87 tmp = is->types[type].top++;
89 logf (LOG_DEBUG, "Allocating block #%d", tmp);
93 static void is_freestore_free(ISAM is, int type, int block)
97 logf (LOG_DEBUG, "Releasing block #%d", block);
98 tmp = is->types[type].freelist;
99 is->types[type].freelist = block;
100 if (bf_write(is->types[type].bf, block, 0, sizeof(tmp), &tmp) < 0)
102 logf (LOG_FATAL, "Failed to deallocate block.");
107 /* this code must be modified to handle an index */
108 int is_p_read_partial(is_mtable *tab, is_mblock *block)
113 assert(block->state == IS_MBSTATE_UNREAD);
114 block->data = buf = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
115 toread = tab->is->types[tab->pos_type].blocksize;
116 if (toread > is_mbuf_size[buf->type])
118 toread = is_mbuf_size[buf->type];
119 block->state = IS_MBSTATE_PARTIAL;
122 block->state = IS_MBSTATE_CLEAN;
123 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, 0, toread,
126 logf (LOG_FATAL, "bfread failed.");
129 /* extract header info */
131 memcpy(&block->num_records, buf->data, sizeof(block->num_records));
132 assert(block->num_records > 0);
133 buf->offset += sizeof(block->num_records);
134 memcpy(&block->nextpos, buf->data + buf->offset,
135 sizeof(block->nextpos));
136 buf->offset += sizeof(block->nextpos);
137 if (block == tab->data) /* first block */
139 memcpy(&tab->num_records, buf->data + buf->offset,
140 sizeof(tab->num_records));
141 buf->offset +=sizeof(tab->num_records);
143 logf(LOG_DEBUG, "R: Block #%d: num %d nextpos %d total %d",
144 block->diskpos, block->num_records, block->nextpos,
145 block == tab->data ? tab->num_records : -1);
146 buf->num = (toread - buf->offset) / is_keysize(tab->is);
147 if (buf->num >= block->num_records)
149 buf->num = block->num_records;
150 block->state = IS_MBSTATE_CLEAN;
153 block->bread = buf->offset + buf->num * is_keysize(tab->is);
157 int is_p_read_full(is_mtable *tab, is_mblock *block)
162 if (block->state == IS_MBSTATE_UNREAD && is_p_read_partial(tab, block) < 0)
164 logf (LOG_FATAL, "partial read failed.");
167 if (block->state == IS_MBSTATE_PARTIAL)
170 dread = block->data->num;
171 while (dread < block->num_records)
173 buf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
176 toread = is_mbuf_size[buf->type] / is_keysize(tab->is);
177 if (toread > block->num_records - dread)
178 toread = block->num_records - dread;
180 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, block->bread, toread *
181 is_keysize(tab->is), buf->data) < 0)
183 logf (LOG_FATAL, "bfread failed.");
189 block->bread += toread * is_keysize(tab->is);
191 block->state = IS_MBSTATE_CLEAN;
193 logf (LOG_DEBUG, "R: Block #%d contains %d records.", block->diskpos, block->num_records);
198 * write dirty blocks to bfile.
199 * Allocate blocks as necessary.
201 void is_p_sync(is_mtable *tab)
206 isam_blocktype *type;
208 type = &tab->is->types[tab->pos_type];
209 for (p = tab->data; p; p = p->next)
211 if (p->state < IS_MBSTATE_DIRTY)
213 /* make sure that blocks are allocated. */
215 p->diskpos = is_freestore_alloc(tab->is, tab->pos_type);
218 if (p->next->diskpos < 0)
219 p->nextpos = p->next->diskpos = is_freestore_alloc(tab->is,
222 p->nextpos = p->next->diskpos;
227 memcpy(type->dbuf, &p->num_records, sizeof(p->num_records));
228 sum += sizeof(p->num_records);
229 memcpy(type->dbuf + sum, &p->nextpos, sizeof(p->nextpos));
230 sum += sizeof(p->nextpos);
231 if (p == tab->data) /* first block */
233 memcpy(type->dbuf + sum, &tab->num_records,
234 sizeof(tab->num_records));
235 sum += sizeof(tab->num_records);
237 logf (LOG_DEBUG, "W: Block #%d contains %d records.", p->diskpos,
239 assert(p->num_records > 0);
240 for (b = p->data; b; b = b->next)
242 logf(LOG_DEBUG, " buf: offset %d, keys %d, type %d, ref %d",
243 b->offset, b->num, b->type, b->refcount);
244 if ((v = b->num * is_keysize(tab->is)) > 0)
245 memcpy(type->dbuf + sum, b->data + b->offset, v);
248 assert(sum <= type->blocksize);
250 if (bf_write(type->bf, p->diskpos, 0, sum, type->dbuf) < 0)
252 logf (LOG_FATAL, "Failed to write block.");
259 * Free all disk blocks associated with table.
261 void is_p_unmap(is_mtable *tab)
265 for (p = tab->data; p; p = p->next)
269 is_freestore_free(tab->is, tab->pos_type, p->diskpos);
275 static is_mbuf *mbuf_takehead(is_mbuf **mb, int *num, int keysize)
277 is_mbuf *p = 0, **pp = &p, *inew;
282 while (*mb && toget >= (*mb)->num)
290 if (toget > 0 && *mb)
292 inew = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
293 inew->next = (*mb)->next;
295 inew->data = (*mb)->data;
297 inew->offset = (*mb)->offset + toget * keysize;
298 inew->num = (*mb)->num - toget;
310 * Split up individual blocks which have grown too large.
311 * is_p_align and is_p_remap are alternative functions which trade off
312 * speed in updating versus optimum usage of disk blocks.
314 void is_p_align(is_mtable *tab)
316 is_mblock *mblock, *inew, *last = 0, *next;
317 is_mbuf *mbufs, *mbp;
318 int blocks, recsblock;
320 logf (LOG_DEBUG, "Realigning table.");
321 for (mblock = tab->data; mblock; mblock = next)
324 if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records == 0)
328 last->next = mblock->next;
329 last->state = IS_MBSTATE_DIRTY;
334 next = tab->data->next;
337 if (next->state < IS_MBSTATE_CLEAN)
339 if (is_p_read_full(tab, next) < 0)
341 logf(LOG_FATAL, "Error during re-alignment");
344 if (next->nextpos && !next->next)
346 next->next = xmalloc_mblock();
347 next->next->diskpos = next->nextpos;
348 next->next->state = IS_MBSTATE_UNREAD;
349 next->next->data = 0;
352 next->state = IS_MBSTATE_DIRTY; /* force re-process */
356 if (mblock->diskpos >= 0)
357 is_freestore_free(tab->is, tab->pos_type, mblock->diskpos);
358 xrelease_mblock(mblock);
360 else if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records >
361 (mblock == tab->data ?
362 tab->is->types[tab->pos_type].max_keys_block0 :
363 tab->is->types[tab->pos_type].max_keys_block))
365 blocks = tab->num_records /
366 tab->is->types[tab->pos_type].nice_keys_block;
367 if (tab->num_records %
368 tab->is->types[tab->pos_type].nice_keys_block)
370 recsblock = tab->num_records / blocks;
373 mbufs = mblock->data;
374 while ((mbp = mbuf_takehead(&mbufs, &recsblock,
375 is_keysize(tab->is))) && recsblock)
379 inew = xmalloc_mblock();
381 inew->state = IS_MBSTATE_DIRTY;
382 inew->next = mblock->next;
386 mblock->num_records = recsblock;
388 mblock = mblock->next;
398 * Reorganize data in blocks for minimum block usage and quick access.
399 * Free surplus blocks.
400 * is_p_align and is_p_remap are alternative functions which trade off
401 * speed in updating versus optimum usage of disk blocks.
403 void is_p_remap(is_mtable *tab)
405 is_mbuf *mbufs, **bufpp, *mbp;
406 is_mblock *blockp, **blockpp;
407 int recsblock, blocks;
409 logf (LOG_DEBUG, "Remapping table.");
410 /* collect all data */
412 for (blockp = tab->data; blockp; blockp = blockp->next)
414 if (blockp->state < IS_MBSTATE_CLEAN && is_m_read_full(tab, blockp) < 0)
416 logf (LOG_FATAL, "Read-full failed in remap.");
419 *bufpp = blockp->data;
421 bufpp = &(*bufpp)->next;
424 blocks = tab->num_records / tab->is->types[tab->pos_type].nice_keys_block;
425 if (tab->num_records % tab->is->types[tab->pos_type].nice_keys_block)
429 recsblock = tab->num_records / blocks + 1;
430 if (recsblock > tab->is->types[tab->pos_type].nice_keys_block)
432 blockpp = &tab->data;
433 while ((mbp = mbuf_takehead(&mbufs, &recsblock, is_keysize(tab->is))) &&
438 *blockpp = xmalloc_mblock();
439 (*blockpp)->diskpos = -1;
441 (*blockpp)->data = mbp;
442 (*blockpp)->num_records = recsblock;
443 (*blockpp)->state = IS_MBSTATE_DIRTY;
444 blockpp = &(*blockpp)->next;
450 for (blockp = *blockpp; blockp; blockp = blockp->next)
451 if (blockp->diskpos >= 0)
452 is_freestore_free(tab->is, tab->pos_type, blockp->diskpos);
453 xfree_mblocks(*blockpp);