2 * Copyright (C) 1995, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.13 1996-10-29 13:56:16 adam
8 * Include of zebrautl.h instead of alexutil.h.
10 * Revision 1.12 1996/04/24 13:29:16 adam
11 * Work on optimized on commit operation.
13 * Revision 1.11 1996/04/23 12:36:41 adam
14 * Started work on more efficient commit operation.
16 * Revision 1.10 1996/04/18 16:02:56 adam
17 * Changed logging a bit.
18 * Removed warning message when commiting flat shadow files.
20 * Revision 1.9 1996/04/12 07:01:57 adam
21 * Yet another bug fix (next_block was initialized to 0; now set to 1).
23 * Revision 1.8 1996/02/07 14:03:49 adam
24 * Work on flat indexed shadow files.
26 * Revision 1.7 1996/02/07 10:08:46 adam
27 * Work on flat shadow (not finished yet).
29 * Revision 1.6 1995/12/15 12:36:53 adam
30 * Moved hash file information to union.
31 * Renamed commit files.
33 * Revision 1.5 1995/12/12 15:57:55 adam
34 * Implemented mf_unlink. cf_unlink uses mf_unlink.
36 * Revision 1.4 1995/12/11 09:03:55 adam
37 * New function: cf_unlink.
38 * New member of commit file head: state (0) deleted, (1) hash file.
40 * Revision 1.3 1995/12/01 16:24:29 adam
41 * Commit files use separate meta file area.
43 * Revision 1.2 1995/12/01 11:37:24 adam
44 * Cached/commit files implemented as meta-files.
46 * Revision 1.1 1995/11/30 08:33:13 adam
47 * Started work on commit facility.
58 #define CF_OPTIMIZE_COMMIT 0
60 void cf_unlink (CFile cf)
62 if (cf->bucket_in_memory)
64 logf (LOG_FATAL, "Cannot unlink potential dirty cache");
69 mf_unlink (cf->block_mf);
70 mf_unlink (cf->hash_mf);
74 #if CF_OPTIMIZE_COMMIT
75 struct map_cache_entity {
84 struct map_cache_entity *map;
89 static struct map_cache *map_cache_init (CFile cf)
91 int mem_max = 2000000;
92 struct map_cache *m_p;
94 m_p = xmalloc (sizeof(*m_p));
96 m_p->max = mem_max / cf->head.block_size;
97 m_p->buf = xmalloc (mem_max);
99 m_p->map = xmalloc (sizeof(*m_p->map) * m_p->max);
103 static int map_cache_cmp_from (const void *p1, const void *p2)
105 return ((struct map_cache_entity*) p1)->from -
106 ((struct map_cache_entity*) p2)->from;
109 static int map_cache_cmp_to (const void *p1, const void *p2)
111 return ((struct map_cache_entity*) p1)->to -
112 ((struct map_cache_entity*) p2)->to;
115 static void map_cache_flush (struct map_cache *m_p)
119 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_from);
120 assert (m_p->no < 2 || m_p->map[0].from < m_p->map[1].from);
121 for (i = 0; i<m_p->no; i++)
123 if (!mf_read (m_p->cf->block_mf, m_p->map[i].from, 0, 0,
124 m_p->buf + i * m_p->cf->head.block_size))
126 logf (LOG_FATAL, "read commit block at position %d",
130 m_p->map[i].from = i;
132 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_to);
133 assert (m_p->no < 2 || m_p->map[0].to < m_p->map[1].to);
134 for (i = 0; i<m_p->no; i++)
136 mf_write (m_p->cf->rmf, m_p->map[i].to, 0, 0,
137 m_p->buf + m_p->map[i].from * m_p->cf->head.block_size);
142 static void map_cache_del (struct map_cache *m_p)
144 map_cache_flush (m_p);
150 static void map_cache_add (struct map_cache *m_p, int from, int to)
154 m_p->map[i].from = from;
158 map_cache_flush (m_p);
161 /* CF_OPTIMIZE_COMMIT */
164 static void cf_commit_hash (CFile cf)
168 struct CFile_ph_bucket *p;
169 #if CF_OPTIMIZE_COMMIT
170 struct map_cache *m_p;
173 #if CF_OPTIMIZE_COMMIT
174 m_p = map_cache_init (cf);
177 p = xmalloc (sizeof(*p));
178 hash_bytes = cf->head.hash_size * sizeof(int);
179 bucket_no = cf->head.first_bucket;
180 for (; bucket_no < cf->head.next_bucket; bucket_no++)
182 if (!mf_read (cf->hash_mf, bucket_no, 0, 0, p))
184 logf (LOG_FATAL, "read commit hash");
187 for (i = 0; i<HASH_BUCKET && p->vno[i]; i++)
189 #if CF_OPTIMIZE_COMMIT
190 map_cache_add (m_p, p->vno[i], p->no[i]);
192 if (!mf_read (cf->block_mf, p->vno[i], 0, 0, cf->iobuf))
194 logf (LOG_FATAL, "read commit block");
197 mf_write (cf->rmf, p->no[i], 0, 0, cf->iobuf);
201 #if CF_OPTIMIZE_COMMIT
207 static void cf_commit_flat (CFile cf)
213 #if CF_OPTIMIZE_COMMIT
214 struct map_cache *m_p;
218 #if CF_OPTIMIZE_COMMIT
219 m_p = map_cache_init (cf);
221 fp = xmalloc (HASH_BSIZE);
222 for (hno = cf->head.next_bucket; hno < cf->head.flat_bucket; hno++)
224 if (hno == cf->head.flat_bucket-1)
226 for (i = 0; i < (HASH_BSIZE/sizeof(int)); i++)
229 if (!mf_read (cf->hash_mf, hno, 0, 0, fp) &&
230 hno != cf->head.flat_bucket-1)
232 logf (LOG_FATAL, "read index block hno=%d (%d-%d) commit",
233 hno, cf->head.next_bucket, cf->head.flat_bucket-1);
235 for (i = 0; i < (HASH_BSIZE/sizeof(int)); i++)
239 #if CF_OPTIMIZE_COMMIT
240 map_cache_add (m_p, fp[i], vno);
242 if (!mf_read (cf->block_mf, fp[i], 0, 0, cf->iobuf))
244 logf (LOG_FATAL, "read data block hno=%d (%d-%d) "
245 "i=%d commit block at %d (->%d)",
246 hno, cf->head.next_bucket, cf->head.flat_bucket-1,
250 mf_write (cf->rmf, vno, 0, 0, cf->iobuf);
257 #if CF_OPTIMIZE_COMMIT
263 void cf_commit (CFile cf)
266 if (cf->bucket_in_memory)
268 logf (LOG_FATAL, "Cannot commit potential dirty cache");
271 if (cf->head.state == 1)
273 else if (cf->head.state == 2)