2 * Copyright (C) 1995-1998, Index Data
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.15 1999-05-26 07:49:12 adam
10 * Revision 1.14 1998/08/07 15:07:16 adam
11 * Fixed but in cf_commit_flat.
13 * Revision 1.13 1996/10/29 13:56:16 adam
14 * Include of zebrautl.h instead of alexutil.h.
16 * Revision 1.12 1996/04/24 13:29:16 adam
17 * Work on optimized on commit operation.
19 * Revision 1.11 1996/04/23 12:36:41 adam
20 * Started work on more efficient commit operation.
22 * Revision 1.10 1996/04/18 16:02:56 adam
23 * Changed logging a bit.
24 * Removed warning message when commiting flat shadow files.
26 * Revision 1.9 1996/04/12 07:01:57 adam
27 * Yet another bug fix (next_block was initialized to 0; now set to 1).
29 * Revision 1.8 1996/02/07 14:03:49 adam
30 * Work on flat indexed shadow files.
32 * Revision 1.7 1996/02/07 10:08:46 adam
33 * Work on flat shadow (not finished yet).
35 * Revision 1.6 1995/12/15 12:36:53 adam
36 * Moved hash file information to union.
37 * Renamed commit files.
39 * Revision 1.5 1995/12/12 15:57:55 adam
40 * Implemented mf_unlink. cf_unlink uses mf_unlink.
42 * Revision 1.4 1995/12/11 09:03:55 adam
43 * New function: cf_unlink.
44 * New member of commit file head: state (0) deleted, (1) hash file.
46 * Revision 1.3 1995/12/01 16:24:29 adam
47 * Commit files use separate meta file area.
49 * Revision 1.2 1995/12/01 11:37:24 adam
50 * Cached/commit files implemented as meta-files.
52 * Revision 1.1 1995/11/30 08:33:13 adam
53 * Started work on commit facility.
64 #define CF_OPTIMIZE_COMMIT 0
66 void cf_unlink (CFile cf)
68 if (cf->bucket_in_memory)
70 logf (LOG_FATAL, "Cannot unlink potential dirty cache");
75 mf_unlink (cf->block_mf);
76 mf_unlink (cf->hash_mf);
80 #if CF_OPTIMIZE_COMMIT
81 struct map_cache_entity {
90 struct map_cache_entity *map;
95 static struct map_cache *map_cache_init (CFile cf)
97 int mem_max = 2000000;
98 struct map_cache *m_p;
100 m_p = xmalloc (sizeof(*m_p));
102 m_p->max = mem_max / cf->head.block_size;
103 m_p->buf = xmalloc (mem_max);
105 m_p->map = xmalloc (sizeof(*m_p->map) * m_p->max);
109 static int map_cache_cmp_from (const void *p1, const void *p2)
111 return ((struct map_cache_entity*) p1)->from -
112 ((struct map_cache_entity*) p2)->from;
115 static int map_cache_cmp_to (const void *p1, const void *p2)
117 return ((struct map_cache_entity*) p1)->to -
118 ((struct map_cache_entity*) p2)->to;
121 static void map_cache_flush (struct map_cache *m_p)
125 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_from);
126 assert (m_p->no < 2 || m_p->map[0].from < m_p->map[1].from);
127 for (i = 0; i<m_p->no; i++)
129 if (!mf_read (m_p->cf->block_mf, m_p->map[i].from, 0, 0,
130 m_p->buf + i * m_p->cf->head.block_size))
132 logf (LOG_FATAL, "read commit block at position %d",
136 m_p->map[i].from = i;
138 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_to);
139 assert (m_p->no < 2 || m_p->map[0].to < m_p->map[1].to);
140 for (i = 0; i<m_p->no; i++)
142 mf_write (m_p->cf->rmf, m_p->map[i].to, 0, 0,
143 m_p->buf + m_p->map[i].from * m_p->cf->head.block_size);
148 static void map_cache_del (struct map_cache *m_p)
150 map_cache_flush (m_p);
156 static void map_cache_add (struct map_cache *m_p, int from, int to)
160 m_p->map[i].from = from;
164 map_cache_flush (m_p);
167 /* CF_OPTIMIZE_COMMIT */
170 static void cf_commit_hash (CFile cf)
174 struct CFile_ph_bucket *p;
175 #if CF_OPTIMIZE_COMMIT
176 struct map_cache *m_p;
179 #if CF_OPTIMIZE_COMMIT
180 m_p = map_cache_init (cf);
183 p = (struct CFile_ph_bucket *) xmalloc (sizeof(*p));
184 hash_bytes = cf->head.hash_size * sizeof(int);
185 bucket_no = cf->head.first_bucket;
186 for (; bucket_no < cf->head.next_bucket; bucket_no++)
188 if (!mf_read (cf->hash_mf, bucket_no, 0, 0, p))
190 logf (LOG_FATAL, "read commit hash");
193 for (i = 0; i<HASH_BUCKET && p->vno[i]; i++)
195 #if CF_OPTIMIZE_COMMIT
196 map_cache_add (m_p, p->vno[i], p->no[i]);
198 if (!mf_read (cf->block_mf, p->vno[i], 0, 0, cf->iobuf))
200 logf (LOG_FATAL, "read commit block");
203 mf_write (cf->rmf, p->no[i], 0, 0, cf->iobuf);
207 #if CF_OPTIMIZE_COMMIT
213 static void cf_commit_flat (CFile cf)
219 #if CF_OPTIMIZE_COMMIT
220 struct map_cache *m_p;
224 #if CF_OPTIMIZE_COMMIT
225 m_p = map_cache_init (cf);
227 fp = (int *) xmalloc (HASH_BSIZE);
228 for (hno = cf->head.next_bucket; hno < cf->head.flat_bucket; hno++)
230 for (i = 0; i < (int) (HASH_BSIZE/sizeof(int)); i++)
232 if (!mf_read (cf->hash_mf, hno, 0, 0, fp) &&
233 hno != cf->head.flat_bucket-1)
235 logf (LOG_FATAL, "read index block hno=%d (%d-%d) commit",
236 hno, cf->head.next_bucket, cf->head.flat_bucket-1);
238 for (i = 0; i < (int) (HASH_BSIZE/sizeof(int)); i++)
242 #if CF_OPTIMIZE_COMMIT
243 map_cache_add (m_p, fp[i], vno);
245 if (!mf_read (cf->block_mf, fp[i], 0, 0, cf->iobuf))
247 logf (LOG_FATAL, "read data block hno=%d (%d-%d) "
248 "i=%d commit block at %d (->%d)",
249 hno, cf->head.next_bucket, cf->head.flat_bucket-1,
253 mf_write (cf->rmf, vno, 0, 0, cf->iobuf);
260 #if CF_OPTIMIZE_COMMIT
266 void cf_commit (CFile cf)
269 if (cf->bucket_in_memory)
271 logf (LOG_FATAL, "Cannot commit potential dirty cache");
274 if (cf->head.state == 1)
276 else if (cf->head.state == 2)