2 * Copyright (C) 1995, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.12 1996-04-24 13:29:16 adam
8 * Work on optimized on commit operation.
10 * Revision 1.11 1996/04/23 12:36:41 adam
11 * Started work on more efficient commit operation.
13 * Revision 1.10 1996/04/18 16:02:56 adam
14 * Changed logging a bit.
15 * Removed warning message when commiting flat shadow files.
17 * Revision 1.9 1996/04/12 07:01:57 adam
18 * Yet another bug fix (next_block was initialized to 0; now set to 1).
20 * Revision 1.8 1996/02/07 14:03:49 adam
21 * Work on flat indexed shadow files.
23 * Revision 1.7 1996/02/07 10:08:46 adam
24 * Work on flat shadow (not finished yet).
26 * Revision 1.6 1995/12/15 12:36:53 adam
27 * Moved hash file information to union.
28 * Renamed commit files.
30 * Revision 1.5 1995/12/12 15:57:55 adam
31 * Implemented mf_unlink. cf_unlink uses mf_unlink.
33 * Revision 1.4 1995/12/11 09:03:55 adam
34 * New function: cf_unlink.
35 * New member of commit file head: state (0) deleted, (1) hash file.
37 * Revision 1.3 1995/12/01 16:24:29 adam
38 * Commit files use separate meta file area.
40 * Revision 1.2 1995/12/01 11:37:24 adam
41 * Cached/commit files implemented as meta-files.
43 * Revision 1.1 1995/11/30 08:33:13 adam
44 * Started work on commit facility.
55 #define CF_OPTIMIZE_COMMIT 0
57 void cf_unlink (CFile cf)
59 if (cf->bucket_in_memory)
61 logf (LOG_FATAL, "Cannot unlink potential dirty cache");
66 mf_unlink (cf->block_mf);
67 mf_unlink (cf->hash_mf);
71 #if CF_OPTIMIZE_COMMIT
72 struct map_cache_entity {
81 struct map_cache_entity *map;
86 static struct map_cache *map_cache_init (CFile cf)
88 int mem_max = 2000000;
89 struct map_cache *m_p;
91 m_p = xmalloc (sizeof(*m_p));
93 m_p->max = mem_max / cf->head.block_size;
94 m_p->buf = xmalloc (mem_max);
96 m_p->map = xmalloc (sizeof(*m_p->map) * m_p->max);
100 static int map_cache_cmp_from (const void *p1, const void *p2)
102 return ((struct map_cache_entity*) p1)->from -
103 ((struct map_cache_entity*) p2)->from;
106 static int map_cache_cmp_to (const void *p1, const void *p2)
108 return ((struct map_cache_entity*) p1)->to -
109 ((struct map_cache_entity*) p2)->to;
112 static void map_cache_flush (struct map_cache *m_p)
116 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_from);
117 assert (m_p->no < 2 || m_p->map[0].from < m_p->map[1].from);
118 for (i = 0; i<m_p->no; i++)
120 if (!mf_read (m_p->cf->block_mf, m_p->map[i].from, 0, 0,
121 m_p->buf + i * m_p->cf->head.block_size))
123 logf (LOG_FATAL, "read commit block at position %d",
127 m_p->map[i].from = i;
129 qsort (m_p->map, m_p->no, sizeof(*m_p->map), map_cache_cmp_to);
130 assert (m_p->no < 2 || m_p->map[0].to < m_p->map[1].to);
131 for (i = 0; i<m_p->no; i++)
133 mf_write (m_p->cf->rmf, m_p->map[i].to, 0, 0,
134 m_p->buf + m_p->map[i].from * m_p->cf->head.block_size);
139 static void map_cache_del (struct map_cache *m_p)
141 map_cache_flush (m_p);
147 static void map_cache_add (struct map_cache *m_p, int from, int to)
151 m_p->map[i].from = from;
155 map_cache_flush (m_p);
158 /* CF_OPTIMIZE_COMMIT */
161 static void cf_commit_hash (CFile cf)
165 struct CFile_ph_bucket *p;
166 #if CF_OPTIMIZE_COMMIT
167 struct map_cache *m_p;
170 #if CF_OPTIMIZE_COMMIT
171 m_p = map_cache_init (cf);
174 p = xmalloc (sizeof(*p));
175 hash_bytes = cf->head.hash_size * sizeof(int);
176 bucket_no = cf->head.first_bucket;
177 for (; bucket_no < cf->head.next_bucket; bucket_no++)
179 if (!mf_read (cf->hash_mf, bucket_no, 0, 0, p))
181 logf (LOG_FATAL, "read commit hash");
184 for (i = 0; i<HASH_BUCKET && p->vno[i]; i++)
186 #if CF_OPTIMIZE_COMMIT
187 map_cache_add (m_p, p->vno[i], p->no[i]);
189 if (!mf_read (cf->block_mf, p->vno[i], 0, 0, cf->iobuf))
191 logf (LOG_FATAL, "read commit block");
194 mf_write (cf->rmf, p->no[i], 0, 0, cf->iobuf);
198 #if CF_OPTIMIZE_COMMIT
204 static void cf_commit_flat (CFile cf)
210 #if CF_OPTIMIZE_COMMIT
211 struct map_cache *m_p;
215 #if CF_OPTIMIZE_COMMIT
216 m_p = map_cache_init (cf);
218 fp = xmalloc (HASH_BSIZE);
219 for (hno = cf->head.next_bucket; hno < cf->head.flat_bucket; hno++)
221 if (hno == cf->head.flat_bucket-1)
223 for (i = 0; i < (HASH_BSIZE/sizeof(int)); i++)
226 if (!mf_read (cf->hash_mf, hno, 0, 0, fp) &&
227 hno != cf->head.flat_bucket-1)
229 logf (LOG_FATAL, "read index block hno=%d (%d-%d) commit",
230 hno, cf->head.next_bucket, cf->head.flat_bucket-1);
232 for (i = 0; i < (HASH_BSIZE/sizeof(int)); i++)
236 #if CF_OPTIMIZE_COMMIT
237 map_cache_add (m_p, fp[i], vno);
239 if (!mf_read (cf->block_mf, fp[i], 0, 0, cf->iobuf))
241 logf (LOG_FATAL, "read data block hno=%d (%d-%d) "
242 "i=%d commit block at %d (->%d)",
243 hno, cf->head.next_bucket, cf->head.flat_bucket-1,
247 mf_write (cf->rmf, vno, 0, 0, cf->iobuf);
254 #if CF_OPTIMIZE_COMMIT
260 void cf_commit (CFile cf)
263 if (cf->bucket_in_memory)
265 logf (LOG_FATAL, "Cannot commit potential dirty cache");
268 if (cf->head.state == 1)
270 else if (cf->head.state == 2)