2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.11 1996-03-20 13:29:17 quinn
10 * Revision 1.10 1996/03/19 19:22:44 quinn
13 * Revision 1.9 1996/02/06 10:19:57 quinn
14 * Attempt at fixing bug. Not all blocks were read before they were unlinked
15 * prior to a remap operation.
17 * Revision 1.8 1996/01/29 09:47:11 quinn
18 * Fixed mean little bug in the read-table code.
20 * Revision 1.7 1995/12/06 14:48:27 quinn
21 * Fixed some strange bugs.
23 * Revision 1.6 1995/09/04 12:33:47 adam
24 * Various cleanup. YAZ util used instead.
26 * Revision 1.5 1994/09/28 11:29:33 quinn
27 * Added cmp parameter.
29 * Revision 1.4 1994/09/27 20:03:53 quinn
30 * Seems relatively bug-free.
32 * Revision 1.3 1994/09/26 17:11:31 quinn
35 * Revision 1.2 1994/09/26 17:06:36 quinn
38 * Revision 1.1 1994/09/26 16:07:57 quinn
39 * Most of the functionality in place.
44 * This module handles the representation of tables in the bfiles.
52 static int is_freestore_alloc(ISAM is, int type)
56 if (is->types[type].freelist >= 0)
58 tmp = is->types[type].freelist;
59 if (bf_read(is->types[type].bf, tmp, 0, sizeof(tmp),
60 &is->types[type].freelist) <=0)
62 logf (LOG_FATAL, "Failed to allocate block");
67 tmp = is->types[type].top++;
69 logf (LOG_DEBUG, "Allocating block #%d", tmp);
73 static void is_freestore_free(ISAM is, int type, int block)
77 logf (LOG_DEBUG, "Releasing block #%d", block);
78 tmp = is->types[type].freelist;
79 is->types[type].freelist = block;
80 if (bf_write(is->types[type].bf, block, 0, sizeof(tmp), &tmp) < 0)
82 logf (LOG_FATAL, "Failed to deallocate block.");
87 /* this code must be modified to handle an index */
88 int is_p_read_partial(is_mtable *tab, is_mblock *block)
93 assert(block->state == IS_MBSTATE_UNREAD);
94 block->data = buf = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
95 toread = tab->is->types[tab->pos_type].blocksize;
96 if (toread > is_mbuf_size[buf->type])
98 toread = is_mbuf_size[buf->type];
99 block->state = IS_MBSTATE_PARTIAL;
102 block->state = IS_MBSTATE_CLEAN;
103 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, 0, toread,
106 logf (LOG_FATAL, "bfread failed.");
109 /* extract header info */
111 memcpy(&block->num_records, buf->data, sizeof(block->num_records));
112 buf->offset += sizeof(block->num_records);
113 memcpy(&block->nextpos, buf->data + buf->offset,
114 sizeof(block->nextpos));
115 buf->offset += sizeof(block->nextpos);
116 if (block == tab->data) /* first block */
118 memcpy(&tab->num_records, buf->data + buf->offset,
119 sizeof(tab->num_records));
120 buf->offset +=sizeof(tab->num_records);
122 logf(LOG_DEBUG, "R: Block #%d: num %d nextpos %d total %d",
123 block->diskpos, block->num_records, block->nextpos,
124 block == tab->data ? tab->num_records : -1);
125 buf->num = (toread - buf->offset) / is_keysize(tab->is);
126 if (buf->num >= block->num_records)
128 buf->num = block->num_records;
129 block->state = IS_MBSTATE_CLEAN;
132 block->bread = buf->offset + buf->num * is_keysize(tab->is);
136 int is_p_read_full(is_mtable *tab, is_mblock *block)
141 if (block->state == IS_MBSTATE_UNREAD && is_p_read_partial(tab, block) < 0)
143 logf (LOG_FATAL, "partial read failed.");
146 if (block->state == IS_MBSTATE_PARTIAL)
149 dread = block->data->num;
150 while (dread < block->num_records)
152 buf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
155 toread = is_mbuf_size[buf->type] / is_keysize(tab->is);
156 if (toread > block->num_records - dread)
157 toread = block->num_records - dread;
159 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, block->bread, toread *
160 is_keysize(tab->is), buf->data) < 0)
162 logf (LOG_FATAL, "bfread failed.");
168 block->bread += toread * is_keysize(tab->is);
170 block->state = IS_MBSTATE_CLEAN;
172 logf (LOG_DEBUG, "R: Block #%d contains %d records.", block->diskpos, block->num_records);
177 * write dirty blocks to bfile.
178 * Allocate blocks as necessary.
180 void is_p_sync(is_mtable *tab)
185 isam_blocktype *type;
187 type = &tab->is->types[tab->pos_type];
188 for (p = tab->data; p; p = p->next)
190 if (p->state < IS_MBSTATE_DIRTY)
192 /* make sure that blocks are allocated. */
194 p->diskpos = is_freestore_alloc(tab->is, tab->pos_type);
197 if (p->next->diskpos < 0)
198 p->nextpos = p->next->diskpos = is_freestore_alloc(tab->is,
201 p->nextpos = p->next->diskpos;
206 memcpy(type->dbuf, &p->num_records, sizeof(p->num_records));
207 sum += sizeof(p->num_records);
208 memcpy(type->dbuf + sum, &p->nextpos, sizeof(p->nextpos));
209 sum += sizeof(p->nextpos);
210 if (p == tab->data) /* first block */
212 memcpy(type->dbuf + sum, &tab->num_records,
213 sizeof(tab->num_records));
214 sum += sizeof(tab->num_records);
216 logf (LOG_DEBUG, "W: Block #%d contains %d records.", p->diskpos,
218 for (b = p->data; b; b = b->next)
220 logf(LOG_DEBUG, " buf: offset %d, keys %d, type %d, ref %d",
221 b->offset, b->num, b->type, b->refcount);
222 if ((v = b->num * is_keysize(tab->is)) > 0)
223 memcpy(type->dbuf + sum, b->data + b->offset, v);
226 assert(sum <= type->blocksize);
228 if (bf_write(type->bf, p->diskpos, 0, sum, type->dbuf) < 0)
230 logf (LOG_FATAL, "Failed to write block.");
237 * Free all disk blocks associated with table.
239 void is_p_unmap(is_mtable *tab)
243 for (p = tab->data; p; p = p->next)
247 is_freestore_free(tab->is, tab->pos_type, p->diskpos);
253 static is_mbuf *mbuf_takehead(is_mbuf **mb, int *num, int keysize)
255 is_mbuf *p = 0, **pp = &p, *new;
260 while (*mb && toget >= (*mb)->num)
268 if (toget > 0 && *mb)
270 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
271 new->next = (*mb)->next;
273 new->data = (*mb)->data;
275 new->offset = (*mb)->offset + toget * keysize;
276 new->num = (*mb)->num - toget;
288 * Split up individual blocks which have grown too large.
289 * is_p_align and is_p_remap are alternative functions which trade off
290 * speed in updating versus optimum usage of disk blocks.
292 void is_p_align(is_mtable *tab)
294 is_mblock *mblock, *new, *last = 0, *next;
295 is_mbuf *mbufs, *mbp;
296 int blocks, recsblock;
298 logf (LOG_DEBUG, "Realigning table.");
299 for (mblock = tab->data; mblock; mblock = next)
302 if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records == 0)
306 last->next = mblock->next;
307 last->state = IS_MBSTATE_DIRTY;
312 next = tab->data->next;
315 if (next->state < IS_MBSTATE_CLEAN)
317 if (is_p_read_full(tab, next) < 0)
319 logf(LOG_FATAL, "Error during re-alignment");
322 if (next->nextpos && !next->next)
324 next->next = xmalloc_mblock();
325 next->next->diskpos = next->nextpos;
326 next->next->state = IS_MBSTATE_UNREAD;
327 next->next->data = 0;
330 next->state = IS_MBSTATE_DIRTY; /* force re-process */
334 if (mblock->diskpos >= 0)
335 is_freestore_free(tab->is, tab->pos_type, mblock->diskpos);
336 xrelease_mblock(mblock);
338 else if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records >
339 (mblock == tab->data ?
340 tab->is->types[tab->pos_type].max_keys_block0 :
341 tab->is->types[tab->pos_type].max_keys_block))
343 blocks = tab->num_records /
344 tab->is->types[tab->pos_type].nice_keys_block;
345 if (tab->num_records %
346 tab->is->types[tab->pos_type].nice_keys_block)
348 recsblock = tab->num_records / blocks;
351 mbufs = mblock->data;
352 while ((mbp = mbuf_takehead(&mbufs, &recsblock,
353 is_keysize(tab->is))) && recsblock)
357 new = xmalloc_mblock();
359 new->state = IS_MBSTATE_DIRTY;
360 new->next = mblock->next;
364 mblock->num_records = recsblock;
366 mblock = mblock->next;
376 * Reorganize data in blocks for minimum block usage and quick access.
377 * Free surplus blocks.
378 * is_p_align and is_p_remap are alternative functions which trade off
379 * speed in updating versus optimum usage of disk blocks.
381 void is_p_remap(is_mtable *tab)
383 is_mbuf *mbufs, **bufpp, *mbp;
384 is_mblock *blockp, **blockpp;
385 int recsblock, blocks;
387 logf (LOG_DEBUG, "Remapping table.");
388 /* collect all data */
390 for (blockp = tab->data; blockp; blockp = blockp->next)
392 if (blockp->state < IS_MBSTATE_CLEAN && is_m_read_full(tab, blockp) < 0)
394 logf (LOG_FATAL, "Read-full failed in remap.");
397 *bufpp = blockp->data;
399 bufpp = &(*bufpp)->next;
402 blocks = tab->num_records / tab->is->types[tab->pos_type].nice_keys_block;
403 if (tab->num_records % tab->is->types[tab->pos_type].nice_keys_block)
407 recsblock = tab->num_records / blocks + 1;
408 if (recsblock > tab->is->types[tab->pos_type].nice_keys_block)
410 blockpp = &tab->data;
411 while ((mbp = mbuf_takehead(&mbufs, &recsblock, is_keysize(tab->is))) &&
416 *blockpp = xmalloc_mblock();
417 (*blockpp)->diskpos = -1;
419 (*blockpp)->data = mbp;
420 (*blockpp)->num_records = recsblock;
421 (*blockpp)->state = IS_MBSTATE_DIRTY;
422 blockpp = &(*blockpp)->next;
428 for (blockp = *blockpp; blockp; blockp = blockp->next)
429 if (blockp->diskpos >= 0)
430 is_freestore_free(tab->is, tab->pos_type, blockp->diskpos);
431 xfree_mblocks(*blockpp);