2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.6 1995-09-04 12:33:47 adam
8 * Various cleanup. YAZ util used instead.
10 * Revision 1.5 1994/09/28 11:29:33 quinn
11 * Added cmp parameter.
13 * Revision 1.4 1994/09/27 20:03:53 quinn
14 * Seems relatively bug-free.
16 * Revision 1.3 1994/09/26 17:11:31 quinn
19 * Revision 1.2 1994/09/26 17:06:36 quinn
22 * Revision 1.1 1994/09/26 16:07:57 quinn
23 * Most of the functionality in place.
28 * This module handles the representation of tables in the bfiles.
36 static int is_freestore_alloc(ISAM is, int type)
40 if (is->types[type].freelist >= 0)
42 tmp = is->types[type].freelist;
43 if (bf_read(is->types[type].bf, tmp, 0, sizeof(tmp),
44 &is->types[type].freelist) <=0)
46 logf (LOG_FATAL, "Failed to allocate block");
51 tmp = is->types[type].top++;
53 logf (LOG_DEBUG, "Allocating block #%d", tmp);
57 static void is_freestore_free(ISAM is, int type, int block)
61 logf (LOG_DEBUG, "Releasing block #%d", block);
62 tmp = is->types[type].freelist;
63 is->types[type].freelist = block;
64 if (bf_write(is->types[type].bf, block, 0, sizeof(tmp), &tmp) < 0)
66 logf (LOG_FATAL, "Failed to deallocate block.");
71 /* this code must be modified to handle an index */
72 int is_p_read_partial(is_mtable *tab, is_mblock *block)
77 assert(block->state == IS_MBSTATE_UNREAD);
78 block->data = buf = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
79 toread = tab->is->types[tab->pos_type].blocksize;
80 if (toread > is_mbuf_size[buf->type])
82 toread = is_mbuf_size[buf->type];
83 block->state = IS_MBSTATE_PARTIAL;
86 block->state = IS_MBSTATE_CLEAN;
87 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, 0, toread,
90 logf (LOG_FATAL, "bfread failed.");
93 /* extract header info */
95 memcpy(&block->num_records, buf->data, sizeof(block->num_records));
96 buf->offset += sizeof(block->num_records);
97 memcpy(&block->nextpos, buf->data + buf->offset,
98 sizeof(block->nextpos));
99 buf->offset += sizeof(block->nextpos);
100 if (block == tab->data) /* first block */
102 memcpy(&tab->num_records, buf->data + buf->offset,
103 sizeof(tab->num_records));
104 buf->offset +=sizeof(tab->num_records);
106 buf->num = (toread - buf->offset) / is_keysize(tab->is);
107 if (buf->num >= block->num_records)
109 buf->num = block->num_records;
110 block->state = IS_MBSTATE_CLEAN;
113 block->bread = buf->num * is_keysize(tab->is);
117 int is_p_read_full(is_mtable *tab, is_mblock *block)
122 if (block->state == IS_MBSTATE_UNREAD && is_p_read_partial(tab, block) < 0)
124 logf (LOG_FATAL, "partial read failed.");
127 if (block->state == IS_MBSTATE_PARTIAL)
130 dread = block->data->num;
131 while (dread < block->num_records)
133 buf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
136 toread = is_mbuf_size[buf->type] / is_keysize(tab->is);
137 if (toread > block->num_records - dread)
138 toread = block->num_records - dread;
140 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, block->bread, toread *
141 is_keysize(tab->is), buf->data) < 0)
143 logf (LOG_FATAL, "bfread failed.");
149 block->bread += toread * is_keysize(tab->is);
152 logf (LOG_DEBUG, "R: Block #%d contains %d records.", block->diskpos, block->num_records);
157 * write dirty blocks to bfile.
158 * Allocate blocks as necessary.
160 void is_p_sync(is_mtable *tab)
165 isam_blocktype *type;
167 type = &tab->is->types[tab->pos_type];
168 for (p = tab->data; p; p = p->next)
170 if (p->state < IS_MBSTATE_DIRTY)
172 /* make sure that blocks are allocated. */
174 p->diskpos = is_freestore_alloc(tab->is, tab->pos_type);
177 if (p->next->diskpos < 0)
178 p->nextpos = p->next->diskpos = is_freestore_alloc(tab->is,
181 p->nextpos = p->next->diskpos;
186 memcpy(type->dbuf, &p->num_records, sizeof(p->num_records));
187 sum += sizeof(p->num_records);
188 memcpy(type->dbuf + sum, &p->nextpos, sizeof(p->nextpos));
189 sum += sizeof(p->nextpos);
190 if (p == tab->data) /* first block */
192 memcpy(type->dbuf + sum, &tab->num_records,
193 sizeof(tab->num_records));
194 sum += sizeof(tab->num_records);
196 for (b = p->data; b; b = b->next)
198 memcpy(type->dbuf + sum, b->data + b->offset, v = b->num *
199 is_keysize(tab->is));
201 assert(sum <= type->blocksize);
203 if (bf_write(type->bf, p->diskpos, 0, sum, type->dbuf) < 0)
205 logf (LOG_FATAL, "Failed to write block.");
208 logf (LOG_DEBUG, "W: Block #%d contains %d records.", p->diskpos, p->num_records);
213 * Free all disk blocks associated with table.
215 void is_p_unmap(is_mtable *tab)
219 for (p = tab->data; p; p = p->next)
222 is_freestore_free(tab->is, tab->pos_type, p->diskpos);
227 static is_mbuf *mbuf_takehead(is_mbuf **mb, int *num, int keysize)
229 is_mbuf *p = 0, **pp = &p, *new;
234 while (*mb && toget >= (*mb)->num)
242 if (toget > 0 && *mb)
244 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
245 new->next = (*mb)->next;
247 new->data = (*mb)->data;
249 new->offset = (*mb)->offset + toget * keysize;
250 new->num = (*mb)->num - toget;
262 * Split up individual blocks which have grown too large.
263 * is_p_align and is_p_remap are alternative functions which trade off
264 * speed in updating versus optimum usage of disk blocks.
266 void is_p_align(is_mtable *tab)
268 is_mblock *mblock, *new, *last = 0, *next;
269 is_mbuf *mbufs, *mbp;
270 int blocks, recsblock;
272 logf (LOG_DEBUG, "Realigning table.");
273 for (mblock = tab->data; mblock; mblock = next)
276 if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records == 0)
280 last->next = mblock->next;
281 last->state = IS_MBSTATE_DIRTY;
286 tab->data = tab->data->next;
287 tab->data->state = IS_MBSTATE_DIRTY;
290 if (mblock->diskpos >= 0)
291 is_freestore_free(tab->is, tab->pos_type, mblock->diskpos);
292 xrelease_mblock(mblock);
294 else if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records >
295 (mblock == tab->data ?
296 tab->is->types[tab->pos_type].max_keys_block0 :
297 tab->is->types[tab->pos_type].max_keys_block))
299 blocks = tab->num_records /
300 tab->is->types[tab->pos_type].nice_keys_block;
301 if (tab->num_records %
302 tab->is->types[tab->pos_type].nice_keys_block)
304 recsblock = tab->num_records / blocks;
307 mbufs = mblock->data;
308 while ((mbp = mbuf_takehead(&mbufs, &recsblock,
309 is_keysize(tab->is))) && recsblock)
313 new = xmalloc_mblock();
315 new->state = IS_MBSTATE_DIRTY;
316 new->next = mblock->next;
320 mblock->num_records = recsblock;
322 mblock = mblock->next;
332 * Reorganize data in blocks for minimum block usage and quick access.
333 * Free surplus blocks.
334 * is_p_align and is_p_remap are alternative functions which trade off
335 * speed in updating versus optimum usage of disk blocks.
337 void is_p_remap(is_mtable *tab)
339 is_mbuf *mbufs, **bufpp, *mbp;
340 is_mblock *blockp, **blockpp;
341 int recsblock, blocks;
343 logf (LOG_DEBUG, "Remapping table.");
344 /* collect all data */
346 for (blockp = tab->data; blockp; blockp = blockp->next)
348 if (blockp->state < IS_MBSTATE_CLEAN && is_m_read_full(tab, blockp) < 0)
350 logf (LOG_FATAL, "Read-full failed in remap.");
353 *bufpp = blockp->data;
355 bufpp = &(*bufpp)->next;
358 blocks = tab->num_records / tab->is->types[tab->pos_type].nice_keys_block;
359 if (tab->num_records % tab->is->types[tab->pos_type].nice_keys_block)
363 recsblock = tab->num_records / blocks + 1;
364 if (recsblock > tab->is->types[tab->pos_type].nice_keys_block)
366 blockpp = &tab->data;
367 while ((mbp = mbuf_takehead(&mbufs, &recsblock, is_keysize(tab->is))) &&
372 *blockpp = xmalloc_mblock();
373 (*blockpp)->diskpos = -1;
375 (*blockpp)->data = mbp;
376 (*blockpp)->num_records = recsblock;
377 (*blockpp)->state = IS_MBSTATE_DIRTY;
378 blockpp = &(*blockpp)->next;
384 for (blockp = *blockpp; blockp; blockp = blockp->next)
385 if (blockp->diskpos >= 0)
386 is_freestore_free(tab->is, tab->pos_type, blockp->diskpos);
387 xfree_mblocks(*blockpp);