2 * Copyright (C) 1994-1999, Index Data
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.17 1999-05-26 07:49:14 adam
10 * Revision 1.16 1999/02/02 14:51:20 adam
11 * Updated WIN32 code specific sections. Changed header.
13 * Revision 1.15 1997/09/09 13:38:11 adam
14 * Partial port to WIN95/NT.
16 * Revision 1.14 1996/10/29 13:56:56 adam
17 * Include of zebrautl.h instead of alexutil.h.
19 * Revision 1.13 1996/03/20 13:29:16 quinn
22 * Revision 1.12 1996/03/11 14:52:23 quinn
23 * Fixed update bug. Repeated insertion in the same area sometimes caused
26 * Revision 1.11 1996/02/10 12:20:58 quinn
27 * *** empty log message ***
29 * Revision 1.10 1995/12/12 14:12:47 quinn
30 * *** empty log message ***
32 * Revision 1.9 1995/12/06 15:48:46 quinn
33 * Fixed update-problem.
35 * Revision 1.8 1995/12/06 14:48:27 quinn
36 * Fixed some strange bugs.
38 * Revision 1.7 1995/12/06 09:59:46 quinn
39 * Fixed memory-consumption bug in memory.c
40 * Added more blocksizes to the default ISAM configuration.
42 * Revision 1.6 1995/09/04 12:33:47 adam
43 * Various cleanup. YAZ util used instead.
45 * Revision 1.5 1994/09/28 16:58:33 quinn
48 * Revision 1.4 1994/09/27 20:03:52 quinn
49 * Seems relatively bug-free.
51 * Revision 1.3 1994/09/26 17:11:30 quinn
54 * Revision 1.2 1994/09/26 17:06:35 quinn
57 * Revision 1.1 1994/09/26 16:07:56 quinn
58 * Most of the functionality in place.
63 * This module accesses and rearranges the records of the tables.
74 int is_mbuf_size[3] = { 0, 1024, 4096 };
76 static is_mblock *mblock_tmplist = 0, *mblock_freelist = 0;
77 static is_mbuf *mbuf_freelist[3] = {0, 0, 0};
79 #define MALLOC_CHUNK 20
81 is_mblock *xmalloc_mblock()
88 mblock_freelist = (is_mblock *)
89 xmalloc(sizeof(is_mblock) * MALLOC_CHUNK);
90 for (i = 0; i < MALLOC_CHUNK - 1; i++)
91 mblock_freelist[i].next = &mblock_freelist[i+1];
92 mblock_freelist[i].next = 0;
94 tmp = mblock_freelist;
95 mblock_freelist = mblock_freelist->next;
97 tmp->state = IS_MBSTATE_UNREAD;
102 is_mbuf *xmalloc_mbuf(int type)
106 if (mbuf_freelist[type])
108 tmp = mbuf_freelist[type];
109 mbuf_freelist[type] = tmp->next;
113 tmp = (is_mbuf*) xmalloc(sizeof(is_mbuf) + is_mbuf_size[type]);
116 tmp->refcount = type ? 1 : 0;
117 tmp->offset = tmp->num = tmp->cur_record = 0;
118 tmp->data = (char*) tmp + sizeof(is_mbuf);
123 void xfree_mbuf(is_mbuf *p)
125 p->next = mbuf_freelist[p->type];
126 mbuf_freelist[p->type] = p;
129 void xfree_mbufs(is_mbuf *l)
141 void xfree_mblock(is_mblock *p)
143 xfree_mbufs(p->data);
144 p->next = mblock_freelist;
148 void xrelease_mblock(is_mblock *p)
150 p->next = mblock_tmplist;
154 void xfree_mblocks(is_mblock *l)
166 void is_m_establish_tab(ISAM is, is_mtable *tab, ISAM_P pos)
168 tab->data = xmalloc_mblock();
171 tab->pos_type = is_type(pos);
172 tab->num_records = -1;
173 tab->data->num_records = -1;
174 tab->data->diskpos = is_block(pos);
175 tab->data->state = IS_MBSTATE_UNREAD;
177 tab->cur_mblock = tab->data;
178 tab->cur_mblock->cur_mbuf = 0;
184 tab->num_records = 0;
185 tab->data->num_records = 0;
186 tab->data->diskpos = -1;
187 tab->data->state = IS_MBSTATE_CLEAN;
188 tab->data->data = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
189 tab->cur_mblock = tab->data;
190 tab->cur_mblock->cur_mbuf = tab->data->data;
191 tab->cur_mblock->cur_mbuf->cur_record = 0;
197 void is_m_release_tab(is_mtable *tab)
199 xfree_mblocks(tab->data);
200 xfree_mblocks(mblock_tmplist);
204 void is_m_rewind(is_mtable *tab)
206 tab->cur_mblock = tab->data;
209 tab->data->cur_mbuf = tab->data->data;
211 tab->data->data->cur_record = 0;
215 static int read_current_full(is_mtable *tab, is_mblock *mblock)
217 if (is_p_read_full(tab, mblock) < 0)
219 if (mblock->nextpos && !mblock->next)
221 mblock->next = xmalloc_mblock();
222 mblock->next->diskpos = mblock->nextpos;
223 mblock->next->state = IS_MBSTATE_UNREAD;
224 mblock->next->data = 0;
226 mblock->cur_mbuf = mblock->data;
227 mblock->data->cur_record = 0;
231 int is_m_read_full(is_mtable *tab, is_mblock *mblock)
233 return read_current_full(tab, mblock);
237 * replace the record right behind the pointer.
239 void is_m_replace_record(is_mtable *tab, const void *rec)
241 is_mbuf *mbuf = tab->cur_mblock->cur_mbuf;
243 /* we assume that block is already in memory and that we are in the
244 * right mbuf, and that it has space for us. */
245 memcpy(mbuf->data + mbuf->offset + (mbuf->cur_record - 1) *
246 is_keysize(tab->is), rec, is_keysize(tab->is));
247 tab->cur_mblock->state = IS_MBSTATE_DIRTY;
251 * Delete the record right behind the pointer.
253 void is_m_delete_record(is_mtable *tab)
255 is_mbuf *mbuf, *inew;
257 mbuf = tab->cur_mblock->cur_mbuf;
258 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
263 else if (mbuf->cur_record == 1) /* beginning of mbuf */
266 mbuf->offset +=is_keysize(tab->is);
267 mbuf->cur_record = 0;
269 else /* middle of mbuf */
271 /* insert block after current one */
272 inew = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
273 inew->next = mbuf->next;
276 /* virtually transfer everything after current record to new one. */
277 inew->data = mbuf->data;
279 inew->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
280 inew->num = mbuf->num - mbuf->cur_record;
282 /* old buf now only contains stuff before current record */
283 mbuf->num = mbuf->cur_record -1;
284 tab->cur_mblock->cur_mbuf = inew;
287 tab->cur_mblock->num_records--;
288 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
291 int is_m_write_record(is_mtable *tab, const void *rec)
293 is_mbuf *mbuf, *oldnext, *dmbuf;
295 /* make sure block is all in memory */
296 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
297 if (read_current_full(tab, tab->cur_mblock) < 0)
299 mbuf = tab->cur_mblock->cur_mbuf;
300 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
302 /* mbuf is reference or full */
303 if (mbuf->refcount != 1 || mbuf->offset + (mbuf->num + 1) *
304 is_keysize(tab->is) > is_mbuf_size[mbuf->type])
306 oldnext = mbuf->next;
307 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
308 mbuf->next->next = oldnext;
310 tab->cur_mblock->cur_mbuf = mbuf;
311 mbuf->cur_record = 0;
316 oldnext = mbuf->next;
317 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_MEDIUM);
318 mbuf->next->next = dmbuf = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
319 dmbuf->data = mbuf->data;
320 dmbuf->next = oldnext;
321 dmbuf->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
322 dmbuf->num = mbuf->num - mbuf->cur_record;
323 mbuf->num -= dmbuf->num;
325 mbuf = tab->cur_mblock->cur_mbuf = mbuf->next;
326 mbuf->cur_record = 0;
329 logf (LOG_DEBUG, "is_m_write_rec(rec == %d)", mbuf->cur_record);
331 memcpy(mbuf->data + mbuf->offset + mbuf->cur_record * is_keysize(tab->is),
332 rec, is_keysize(tab->is));
336 tab->cur_mblock->num_records++;
337 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
341 void is_m_unread_record(is_mtable *tab)
343 assert(tab->cur_mblock->cur_mbuf->cur_record);
345 tab->cur_mblock->cur_mbuf = tab->last_mbuf;
347 tab->cur_mblock->cur_mbuf->cur_record--;
351 * non-destructive read.
353 int is_m_peek_record(is_mtable *tab, void *rec)
358 /* make sure block is all in memory */
359 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
360 if (read_current_full(tab, tab->cur_mblock) < 0)
362 mblock = tab->cur_mblock;
363 mbuf = mblock->cur_mbuf;
364 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
366 if (!mbuf->next) /* end of mblock */
370 mblock = mblock->next;
371 if (mblock->state <= IS_MBSTATE_PARTIAL)
372 if (read_current_full(tab, mblock) < 0)
377 return 0; /* EOTable */
381 mbuf->cur_record = 0;
383 memcpy(rec, mbuf->data + mbuf->offset + mbuf->cur_record *
384 is_keysize(tab->is), is_keysize(tab->is));
388 int is_m_read_record(is_mtable *tab, void *buf, int keep)
392 /* make sure block is all in memory */
393 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
394 if (read_current_full(tab, tab->cur_mblock) < 0)
396 mbuf = tab->cur_mblock->cur_mbuf;
397 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
399 if (!mbuf->next) /* end of mblock */
401 if (!keep && tab->cur_mblock->state == IS_MBSTATE_CLEAN &&
402 tab->cur_mblock->diskpos > 0)
404 xfree_mbufs(tab->cur_mblock->data);
405 tab->cur_mblock->data = 0;
406 tab->cur_mblock->state = IS_MBSTATE_UNREAD;
408 if (tab->cur_mblock->next)
410 tab->cur_mblock = tab->cur_mblock->next;
411 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
412 if (read_current_full(tab, tab->cur_mblock) < 0)
414 tab->cur_mblock->cur_mbuf = mbuf = tab->cur_mblock->data;
418 return 0; /* EOTable */
422 tab->last_mbuf = mbuf;
423 tab->cur_mblock->cur_mbuf = mbuf = mbuf->next;
425 mbuf->cur_record = 0;
429 memcpy(buf, mbuf->data + mbuf->offset + mbuf->cur_record *
430 is_keysize(tab->is), is_keysize(tab->is));
436 * TODO: optimize this function by introducing a higher-level search.
438 int is_m_seek_record(is_mtable *tab, const void *rec)
440 char peek[IS_MAX_RECORD];
445 if (is_m_read_record(tab, &peek, 1) <= 0)
447 if ((rs = (*tab->is->cmp)(peek, rec)) > 0)
449 is_m_unread_record(tab);
457 int is_m_num_records(is_mtable *tab)
459 if (tab->data->state < IS_MBSTATE_PARTIAL)
460 if (read_current_full(tab, tab->data) < 0)
462 logf (LOG_FATAL, "read full failed");
465 return tab->num_records;