2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.9 1995-12-06 15:48:46 quinn
8 * Fixed update-problem.
10 * Revision 1.8 1995/12/06 14:48:27 quinn
11 * Fixed some strange bugs.
13 * Revision 1.7 1995/12/06 09:59:46 quinn
14 * Fixed memory-consumption bug in memory.c
15 * Added more blocksizes to the default ISAM configuration.
17 * Revision 1.6 1995/09/04 12:33:47 adam
18 * Various cleanup. YAZ util used instead.
20 * Revision 1.5 1994/09/28 16:58:33 quinn
23 * Revision 1.4 1994/09/27 20:03:52 quinn
24 * Seems relatively bug-free.
26 * Revision 1.3 1994/09/26 17:11:30 quinn
29 * Revision 1.2 1994/09/26 17:06:35 quinn
32 * Revision 1.1 1994/09/26 16:07:56 quinn
33 * Most of the functionality in place.
38 * This module accesses and rearranges the records of the tables.
47 int is_mbuf_size[3] = { 0, 1024, 4096 };
49 static is_mblock *mblock_tmplist = 0, *mblock_freelist = 0;
50 static is_mbuf *mbuf_freelist[3] = {0, 0, 0};
52 #define MALLOC_CHUNK 20
54 is_mblock *xmalloc_mblock()
61 mblock_freelist = xmalloc(sizeof(is_mblock) * MALLOC_CHUNK);
62 for (i = 0; i < MALLOC_CHUNK - 1; i++)
63 mblock_freelist[i].next = &mblock_freelist[i+1];
64 mblock_freelist[i].next = 0;
66 tmp = mblock_freelist;
67 mblock_freelist = mblock_freelist->next;
69 tmp->state = IS_MBSTATE_UNREAD;
74 is_mbuf *xmalloc_mbuf(int type)
78 if (mbuf_freelist[type])
80 tmp = mbuf_freelist[type];
81 mbuf_freelist[type] = tmp->next;
85 tmp = xmalloc(sizeof(is_mbuf) + is_mbuf_size[type]);
88 tmp->refcount = type ? 1 : 0;
89 tmp->offset = tmp->num = tmp->cur_record = 0;
90 tmp->data = (char*) tmp + sizeof(is_mbuf);
95 void xfree_mbuf(is_mbuf *p)
97 p->next = mbuf_freelist[p->type];
98 mbuf_freelist[p->type] = p;
101 void xfree_mbufs(is_mbuf *l)
113 void xfree_mblock(is_mblock *p)
115 xfree_mbufs(p->data);
116 p->next = mblock_freelist;
120 void xrelease_mblock(is_mblock *p)
122 p->next = mblock_tmplist;
126 void xfree_mblocks(is_mblock *l)
138 void is_m_establish_tab(ISAM is, is_mtable *tab, ISAM_P pos)
140 tab->data = xmalloc_mblock();
143 tab->pos_type = is_type(pos);
144 tab->num_records = -1;
145 tab->data->num_records = -1;
146 tab->data->diskpos = is_block(pos);
147 tab->data->state = IS_MBSTATE_UNREAD;
149 tab->cur_mblock = tab->data;
150 tab->cur_mblock->cur_mbuf = 0;
155 tab->num_records = 0;
156 tab->data->num_records = 0;
157 tab->data->diskpos = -1;
158 tab->data->state = IS_MBSTATE_CLEAN;
159 tab->data->data = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
160 tab->cur_mblock = tab->data;
161 tab->cur_mblock->cur_mbuf = tab->data->data;
162 tab->cur_mblock->cur_mbuf->cur_record = 0;
167 void is_m_release_tab(is_mtable *tab)
169 xfree_mblocks(tab->data);
170 xfree_mblocks(mblock_tmplist);
174 void is_m_rewind(is_mtable *tab)
176 tab->cur_mblock = tab->data;
179 tab->data->cur_mbuf = tab->data->data;
181 tab->data->data->cur_record = 0;
185 static int read_current_full(is_mtable *tab, is_mblock *mblock)
187 if (is_p_read_full(tab, mblock) < 0)
189 if (mblock->nextpos && !mblock->next)
191 mblock->next = xmalloc_mblock();
192 mblock->next->diskpos = mblock->nextpos;
193 mblock->next->state = IS_MBSTATE_UNREAD;
194 mblock->next->data = 0;
196 mblock->cur_mbuf = mblock->data;
197 mblock->data->cur_record = 0;
201 int is_m_read_full(is_mtable *tab, is_mblock *mblock)
203 return read_current_full(tab, mblock);
207 * replace the record right behind the pointer.
209 void is_m_replace_record(is_mtable *tab, const void *rec)
211 is_mbuf *mbuf = tab->cur_mblock->cur_mbuf;
213 /* we assume that block is already in memory and that we are in the
214 * right mbuf, and that it has space for us. */
215 memcpy(mbuf->data + mbuf->offset + (mbuf->cur_record - 1) *
216 is_keysize(tab->is), rec, is_keysize(tab->is));
217 tab->cur_mblock->state = IS_MBSTATE_DIRTY;
221 * Delete the record right behind the pointer.
223 void is_m_delete_record(is_mtable *tab)
227 mbuf = tab->cur_mblock->cur_mbuf;
228 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
233 else /* middle of a block */
235 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
236 new->next = mbuf->next;
238 new->data = mbuf->data;
240 new->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
241 new->num = mbuf->num - mbuf->cur_record;
242 mbuf->num = mbuf->cur_record -1;
244 mbuf->cur_record = 0;
247 tab->cur_mblock->num_records--;
248 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
251 int is_m_write_record(is_mtable *tab, const void *rec)
253 is_mbuf *mbuf, *oldnext, *dmbuf;
255 /* make sure block is all in memory */
256 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
257 if (read_current_full(tab, tab->cur_mblock) < 0)
259 mbuf = tab->cur_mblock->cur_mbuf;
260 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
262 /* mbuf is reference or full */
263 if (mbuf->refcount != 1 || mbuf->offset + (mbuf->num + 1) *
264 is_keysize(tab->is) > is_mbuf_size[mbuf->type])
266 oldnext = mbuf->next;
267 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
268 mbuf->next->next = oldnext;
270 tab->cur_mblock->cur_mbuf = mbuf;
271 mbuf->cur_record = 0;
276 oldnext = mbuf->next;
277 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_MEDIUM);
278 mbuf->next->next = dmbuf = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
279 dmbuf->data = mbuf->data;
280 dmbuf->next = oldnext;
281 dmbuf->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
282 dmbuf->num = mbuf->num - mbuf->cur_record;
283 mbuf->num -= dmbuf->num;
285 mbuf = tab->cur_mblock->cur_mbuf = mbuf->next;
286 mbuf->cur_record = 0;
288 logf (LOG_DEBUG, "is_m_write_rec(rec == %d)", mbuf->cur_record);
289 memcpy(mbuf->data + mbuf->offset + mbuf->cur_record * is_keysize(tab->is),
290 rec, is_keysize(tab->is));
294 tab->cur_mblock->num_records++;
295 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
299 void is_m_unread_record(is_mtable *tab)
301 assert(tab->cur_mblock->cur_mbuf->cur_record);
302 tab->cur_mblock->cur_mbuf->cur_record--;
306 * non-destructive read.
308 int is_m_peek_record(is_mtable *tab, void *rec)
313 /* make sure block is all in memory */
314 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
315 if (read_current_full(tab, tab->cur_mblock) < 0)
317 mblock = tab->cur_mblock;
318 mbuf = mblock->cur_mbuf;
319 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
321 if (!mbuf->next) /* end of mblock */
325 mblock = mblock->next;
326 if (mblock->state <= IS_MBSTATE_PARTIAL)
327 if (read_current_full(tab, mblock) < 0)
332 return 0; /* EOTable */
336 mbuf->cur_record = 0;
338 memcpy(rec, mbuf->data + mbuf->offset + mbuf->cur_record *
339 is_keysize(tab->is), is_keysize(tab->is));
343 int is_m_read_record(is_mtable *tab, void *buf, int keep)
347 /* make sure block is all in memory */
348 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
349 if (read_current_full(tab, tab->cur_mblock) < 0)
351 mbuf = tab->cur_mblock->cur_mbuf;
352 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
354 if (!mbuf->next) /* end of mblock */
356 if (!keep && tab->cur_mblock->state == IS_MBSTATE_CLEAN &&
357 tab->cur_mblock->diskpos > 0)
359 xfree_mbufs(tab->cur_mblock->data);
360 tab->cur_mblock->data = 0;
361 tab->cur_mblock->state = IS_MBSTATE_UNREAD;
363 if (tab->cur_mblock->next)
365 tab->cur_mblock = tab->cur_mblock->next;
366 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
367 if (read_current_full(tab, tab->cur_mblock) < 0)
369 tab->cur_mblock->cur_mbuf = mbuf = tab->cur_mblock->data;
372 return 0; /* EOTable */
375 tab->cur_mblock->cur_mbuf = mbuf = mbuf->next;
376 mbuf->cur_record = 0;
378 memcpy(buf, mbuf->data + mbuf->offset + mbuf->cur_record *
379 is_keysize(tab->is), is_keysize(tab->is));
385 * TODO: optimize this function by introducing a higher-level search.
387 int is_m_seek_record(is_mtable *tab, const void *rec)
389 char peek[IS_MAX_RECORD];
394 if (is_m_read_record(tab, &peek, 1) <= 0)
396 if ((rs = (*tab->is->cmp)(peek, rec)) > 0)
398 is_m_unread_record(tab);
406 int is_m_num_records(is_mtable *tab)
408 if (tab->data->state < IS_MBSTATE_PARTIAL)
409 if (read_current_full(tab, tab->data) < 0)
411 logf (LOG_FATAL, "read full failed");
414 return tab->num_records;