2 * Copyright (C) 1995-1998, Index Data
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.14 1998-08-07 15:07:16 adam
8 * Fixed but in cf_commit_flat.
10 * Revision 1.13 1996/10/29 13:56:16 adam
11 * Include of zebrautl.h instead of alexutil.h.
13 * Revision 1.12 1996/04/24 13:29:16 adam
14 * Work on optimized on commit operation.
16 * Revision 1.11 1996/04/23 12:36:41 adam
17 * Started work on more efficient commit operation.
19 * Revision 1.10 1996/04/18 16:02:56 adam
20 * Changed logging a bit.
21 * Removed warning message when commiting flat shadow files.
23 * Revision 1.9 1996/04/12 07:01:57 adam
24 * Yet another bug fix (next_block was initialized to 0; now set to 1).
26 * Revision 1.8 1996/02/07 14:03:49 adam
27 * Work on flat indexed shadow files.
29 * Revision 1.7 1996/02/07 10:08:46 adam
30 * Work on flat shadow (not finished yet).
32 * Revision 1.6 1995/12/15 12:36:53 adam
33 * Moved hash file information to union.
34 * Renamed commit files.
36 * Revision 1.5 1995/12/12 15:57:55 adam
37 * Implemented mf_unlink. cf_unlink uses mf_unlink.
39 * Revision 1.4 1995/12/11 09:03:55 adam
40 * New function: cf_unlink.
41 * New member of commit file head: state (0) deleted, (1) hash file.
43 * Revision 1.3 1995/12/01 16:24:29 adam
44 * Commit files use separate meta file area.
46 * Revision 1.2 1995/12/01 11:37:24 adam
47 * Cached/commit files implemented as meta-files.
49 * Revision 1.1 1995/11/30 08:33:13 adam
50 * Started work on commit facility.
61 #define CF_OPTIMIZE_COMMIT 0
63 void cf_unlink (CFile cf)
65 if (cf->bucket_in_memory)
67 logf (LOG_FATAL, "Cannot unlink potential dirty cache");
72 mf_unlink (cf->block_mf);
73 mf_unlink (cf->hash_mf);
77 #if CF_OPTIMIZE_COMMIT
78 struct map_cache_entity {
87 struct map_cache_entity *map;
92 static struct map_cache *map_cache_init (CFile cf)
94 int mem_max = 2000000;
95 struct map_cache *m_p;
97 m_p = xmalloc (sizeof(*m_p));
99 m_p->max = mem_max / cf->head.block_size;
100 m_p->buf = xmalloc (mem_max);
102 m_p->map = xmalloc (sizeof(*m_p->map) * m_p->max);
106 static int map_cache_cmp_from (const void *p1, const void *p2)
108 return ((struct map_cache_entity*) p1)->from -
109 ((struct map_cache_entity*) p2)->from;
112 static int map_cache_cmp_to (const void *p1, const void *p2)
114 return ((struct map_cache_entity*) p1)->to -
115 ((struct map_cache_entity*) p2)->to;
118 static void map_cache_flush (struct map_cache *m_p)
122 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_from);
123 assert (m_p->no < 2 || m_p->map[0].from < m_p->map[1].from);
124 for (i = 0; i<m_p->no; i++)
126 if (!mf_read (m_p->cf->block_mf, m_p->map[i].from, 0, 0,
127 m_p->buf + i * m_p->cf->head.block_size))
129 logf (LOG_FATAL, "read commit block at position %d",
133 m_p->map[i].from = i;
135 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_to);
136 assert (m_p->no < 2 || m_p->map[0].to < m_p->map[1].to);
137 for (i = 0; i<m_p->no; i++)
139 mf_write (m_p->cf->rmf, m_p->map[i].to, 0, 0,
140 m_p->buf + m_p->map[i].from * m_p->cf->head.block_size);
145 static void map_cache_del (struct map_cache *m_p)
147 map_cache_flush (m_p);
153 static void map_cache_add (struct map_cache *m_p, int from, int to)
157 m_p->map[i].from = from;
161 map_cache_flush (m_p);
164 /* CF_OPTIMIZE_COMMIT */
167 static void cf_commit_hash (CFile cf)
171 struct CFile_ph_bucket *p;
172 #if CF_OPTIMIZE_COMMIT
173 struct map_cache *m_p;
176 #if CF_OPTIMIZE_COMMIT
177 m_p = map_cache_init (cf);
180 p = xmalloc (sizeof(*p));
181 hash_bytes = cf->head.hash_size * sizeof(int);
182 bucket_no = cf->head.first_bucket;
183 for (; bucket_no < cf->head.next_bucket; bucket_no++)
185 if (!mf_read (cf->hash_mf, bucket_no, 0, 0, p))
187 logf (LOG_FATAL, "read commit hash");
190 for (i = 0; i<HASH_BUCKET && p->vno[i]; i++)
192 #if CF_OPTIMIZE_COMMIT
193 map_cache_add (m_p, p->vno[i], p->no[i]);
195 if (!mf_read (cf->block_mf, p->vno[i], 0, 0, cf->iobuf))
197 logf (LOG_FATAL, "read commit block");
200 mf_write (cf->rmf, p->no[i], 0, 0, cf->iobuf);
204 #if CF_OPTIMIZE_COMMIT
210 static void cf_commit_flat (CFile cf)
216 #if CF_OPTIMIZE_COMMIT
217 struct map_cache *m_p;
221 #if CF_OPTIMIZE_COMMIT
222 m_p = map_cache_init (cf);
224 fp = xmalloc (HASH_BSIZE);
225 for (hno = cf->head.next_bucket; hno < cf->head.flat_bucket; hno++)
227 for (i = 0; i < (HASH_BSIZE/sizeof(int)); i++)
229 if (!mf_read (cf->hash_mf, hno, 0, 0, fp) &&
230 hno != cf->head.flat_bucket-1)
232 logf (LOG_FATAL, "read index block hno=%d (%d-%d) commit",
233 hno, cf->head.next_bucket, cf->head.flat_bucket-1);
235 for (i = 0; i < (HASH_BSIZE/sizeof(int)); i++)
239 #if CF_OPTIMIZE_COMMIT
240 map_cache_add (m_p, fp[i], vno);
242 if (!mf_read (cf->block_mf, fp[i], 0, 0, cf->iobuf))
244 logf (LOG_FATAL, "read data block hno=%d (%d-%d) "
245 "i=%d commit block at %d (->%d)",
246 hno, cf->head.next_bucket, cf->head.flat_bucket-1,
250 mf_write (cf->rmf, vno, 0, 0, cf->iobuf);
257 #if CF_OPTIMIZE_COMMIT
263 void cf_commit (CFile cf)
266 if (cf->bucket_in_memory)
268 logf (LOG_FATAL, "Cannot commit potential dirty cache");
271 if (cf->head.state == 1)
273 else if (cf->head.state == 2)