2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.5 1994-09-28 16:58:33 quinn
10 * Revision 1.4 1994/09/27 20:03:52 quinn
11 * Seems relatively bug-free.
13 * Revision 1.3 1994/09/26 17:11:30 quinn
16 * Revision 1.2 1994/09/26 17:06:35 quinn
19 * Revision 1.1 1994/09/26 16:07:56 quinn
20 * Most of the functionality in place.
25 * This module accesses and rearranges the records of the tables.
33 int is_mbuf_size[3] = { 0, 1024, 4096 };
35 static is_mblock *mblock_tmplist = 0, *mblock_freelist = 0;
36 static is_mbuf *mbuf_freelist[3] = {0, 0, 0};
38 #define MALLOC_CHUNK 20
40 is_mblock *xmalloc_mblock()
47 mblock_freelist = xmalloc(sizeof(is_mblock) * MALLOC_CHUNK);
48 for (i = 0; i < MALLOC_CHUNK - 1; i++)
49 mblock_freelist[i].next = &mblock_freelist[i+1];
50 mblock_freelist[i].next = 0;
52 tmp = mblock_freelist;
53 mblock_freelist = mblock_freelist->next;
55 tmp->state = IS_MBSTATE_UNREAD;
60 is_mbuf *xmalloc_mbuf(int type)
64 if (mbuf_freelist[type])
66 tmp = mbuf_freelist[type];
67 mbuf_freelist[type] = tmp->next;
71 tmp = xmalloc(sizeof(is_mbuf) + is_mbuf_size[type]);
74 tmp->refcount = type ? 1 : 0;
75 tmp->offset = tmp->num = tmp->cur_record = 0;
76 tmp->data = (char*) tmp + sizeof(is_mbuf);
81 void xfree_mbuf(is_mbuf *p)
83 p->next = mbuf_freelist[p->type];
84 mbuf_freelist[p->type] = p;
87 void xfree_mbufs(is_mbuf *l)
99 void xfree_mblock(is_mblock *p)
101 xfree_mbufs(p->data);
102 p->next = mblock_freelist;
106 void xrelease_mblock(is_mblock *p)
108 p->next = mblock_tmplist;
112 void xfree_mblocks(is_mblock *l)
124 void is_m_establish_tab(ISAM is, is_mtable *tab, ISAM_P pos)
126 tab->data = xmalloc_mblock();
129 tab->pos_type = is_type(pos);
130 tab->num_records = -1;
131 tab->data->num_records = -1;
132 tab->data->diskpos = is_block(pos);
133 tab->data->state = IS_MBSTATE_UNREAD;
135 tab->cur_mblock = tab->data;
136 tab->cur_mblock->cur_mbuf = 0;
141 tab->num_records = 0;
142 tab->data->num_records = 0;
143 tab->data->diskpos = -1;
144 tab->data->state = IS_MBSTATE_CLEAN;
145 tab->data->data = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
146 tab->cur_mblock = tab->data;
147 tab->cur_mblock->cur_mbuf = tab->data->data;
148 tab->cur_mblock->cur_mbuf->cur_record = 0;
153 void is_m_release_tab(is_mtable *tab)
155 xfree_mblocks(tab->data);
156 xfree_mblocks(mblock_tmplist);
160 void is_m_rewind(is_mtable *tab)
162 tab->cur_mblock = tab->data;
165 tab->data->cur_mbuf = tab->data->data;
167 tab->data->data->cur_record = 0;
171 static int read_current_full(is_mtable *tab, is_mblock *mblock)
173 if (is_p_read_full(tab, mblock) < 0)
175 if (mblock->nextpos && !mblock->next)
177 mblock->next = xmalloc_mblock();
178 mblock->next->diskpos = mblock->nextpos;
179 mblock->next->state = IS_MBSTATE_UNREAD;
180 mblock->next->data = 0;
182 mblock->cur_mbuf = mblock->data;
183 mblock->data->cur_record = 0;
187 int is_m_read_full(is_mtable *tab, is_mblock *mblock)
189 return read_current_full(tab, mblock);
193 * replace the record right behind the pointer.
195 void is_m_replace_record(is_mtable *tab, const void *rec)
197 is_mbuf *mbuf = tab->cur_mblock->cur_mbuf;
199 /* we assume that block is already in memory and that we are in the
200 * right mbuf, and that it has space for us. */
201 memcpy(mbuf->data + mbuf->offset + (mbuf->cur_record - 1) *
202 is_keysize(tab->is), rec, is_keysize(tab->is));
203 tab->cur_mblock->state = IS_MBSTATE_DIRTY;
207 * Delete the record right behind the pointer.
209 void is_m_delete_record(is_mtable *tab)
213 mbuf = tab->cur_mblock->cur_mbuf;
214 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
219 else /* middle of a block */
221 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
222 new->next = mbuf->next;
224 new->data = mbuf->data;
226 new->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
227 new->num = mbuf->num - mbuf->cur_record;
228 mbuf->num = mbuf->cur_record -1;
230 mbuf->cur_record = 0;
233 tab->cur_mblock->num_records--;
234 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
237 int is_m_write_record(is_mtable *tab, const void *rec)
239 is_mbuf *mbuf, *oldnext, *dmbuf;
241 /* make sure block is all in memory */
242 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
243 if (read_current_full(tab, tab->cur_mblock) < 0)
245 mbuf = tab->cur_mblock->cur_mbuf;
246 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
248 /* mbuf is reference or full */
249 if (mbuf->refcount != 1 || mbuf->offset + (mbuf->num + 1) *
250 is_keysize(tab->is) > is_mbuf_size[mbuf->type])
252 oldnext = mbuf->next;
253 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
254 mbuf->next->next = oldnext;
256 tab->cur_mblock->cur_mbuf = mbuf;
257 mbuf->cur_record = 0;
262 oldnext = mbuf->next;
263 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_MEDIUM);
264 mbuf->next->next = dmbuf = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
265 dmbuf->data = mbuf->data;
266 dmbuf->next = oldnext;
267 dmbuf->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
268 dmbuf->num = mbuf->num - mbuf->cur_record;
269 mbuf->num -= dmbuf->num;
271 mbuf = tab->cur_mblock->cur_mbuf = mbuf->next;
272 mbuf->cur_record = 0;
274 log(LOG_DEBUG, "is_m_write_rec(rec == %d)", mbuf->cur_record);
275 memcpy(mbuf->data + mbuf->offset + mbuf->cur_record * is_keysize(tab->is),
276 rec, is_keysize(tab->is));
280 tab->cur_mblock->num_records++;
281 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
285 void is_m_unread_record(is_mtable *tab)
287 assert(tab->cur_mblock->cur_mbuf->cur_record);
288 tab->cur_mblock->cur_mbuf->cur_record--;
292 * non-destructive read.
294 int is_m_peek_record(is_mtable *tab, void *rec)
299 /* make sure block is all in memory */
300 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
301 if (read_current_full(tab, tab->cur_mblock) < 0)
303 mblock = tab->cur_mblock;
304 mbuf = mblock->cur_mbuf;
305 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
307 if (!mbuf->next) /* end of mblock */
311 mblock = mblock->next;
312 if (mblock->state <= IS_MBSTATE_PARTIAL)
313 if (read_current_full(tab, mblock) < 0)
318 return 0; /* EOTable */
322 mbuf->cur_record = 0;
324 memcpy(rec, mbuf->data + mbuf->offset + mbuf->cur_record *
325 is_keysize(tab->is), is_keysize(tab->is));
329 int is_m_read_record(is_mtable *tab, void *buf)
333 /* make sure block is all in memory */
334 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
335 if (read_current_full(tab, tab->cur_mblock) < 0)
337 mbuf = tab->cur_mblock->cur_mbuf;
338 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
340 if (!mbuf->next) /* end of mblock */
342 if (tab->cur_mblock->next)
344 tab->cur_mblock = tab->cur_mblock->next;
345 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
346 if (read_current_full(tab, tab->cur_mblock) < 0)
348 tab->cur_mblock->cur_mbuf = mbuf = tab->cur_mblock->data;
351 return 0; /* EOTable */
354 tab->cur_mblock->cur_mbuf = mbuf = mbuf->next;
355 mbuf->cur_record = 0;
357 memcpy(buf, mbuf->data + mbuf->offset + mbuf->cur_record *
358 is_keysize(tab->is), is_keysize(tab->is));
364 * TODO: optimize this function by introducing a higher-level search.
366 int is_m_seek_record(is_mtable *tab, const void *rec)
368 char peek[IS_MAX_RECORD];
373 if (is_m_read_record(tab, &peek) <= 0)
375 if ((rs = (*tab->is->cmp)(peek, rec)) > 0)
377 is_m_unread_record(tab);
385 int is_m_num_records(is_mtable *tab)
387 if (tab->data->state < IS_MBSTATE_PARTIAL)
388 if (read_current_full(tab, tab->data) < 0)
390 log(LOG_FATAL, "read full failed");
393 return tab->num_records;