1 /* This file is part of Pazpar2.
2 Copyright (C) Index Data
4 Pazpar2 is free software; you can redistribute it and/or modify it under
5 the terms of the GNU General Public License as published by the Free
6 Software Foundation; either version 2, or (at your option) any later
9 Pazpar2 is distributed in the hope that it will be useful, but WITHOUT ANY
10 WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 #include "relevance.h"
34 #define log2(x) (log(x)/log(2))
39 int *doc_frequency_vec;
40 int *term_frequency_vec_tmp;
43 struct word_entry *entries;
44 pp2_charset_token_t prt;
50 struct norm_client *norm;
55 const char *display_str;
58 struct word_entry *next;
61 // Structure to keep data for norm_client scores from one client
64 int num; // number of the client
68 const char *native_score;
70 float a,b; // Rn = a*R + b
71 struct client *client;
72 struct norm_client *next;
73 struct norm_record *records;
76 const int scorefield_none = -1; // Do not normalize anything, use tf/idf as is
77 // This is the old behavior, and the default
78 const int scorefield_internal = -2; // use our tf/idf, but normalize it
79 const int scorefield_position = -3; // fake a score based on the position
80 // Positive numbers indicate the field to be used for scoring.
82 // A structure for each (sub)record. There is one list for each client
85 struct record *record;
87 struct record_cluster *clust;
88 struct norm_record *next;
91 // Find the norm_client entry for this client, or create one if not there
92 struct norm_client *findnorm( struct relevance *rel, struct client* client)
94 struct norm_client *n = rel->norm;
95 struct session_database *sdb;
97 if (n->client == client )
101 n = nmem_malloc(rel->nmem, sizeof(struct norm_client) );
103 n->num = rel->norm->num +1;
112 sdb = client_get_database(client);
113 n->native_score = session_setting_oneval(sdb, PZ_NATIVE_SCORE);
115 n->scorefield = scorefield_none;
116 yaz_log(YLOG_LOG,"Normalizing: Client %d uses '%s'", n->num, n->native_score );
117 if ( ! n->native_score || ! *n->native_score ) // not specified
118 n->scorefield = scorefield_none;
119 else if ( strcmp(n->native_score,"position") == 0 )
120 n->scorefield = scorefield_position;
121 else if ( strcmp(n->native_score,"internal") == 0 )
122 n->scorefield = scorefield_internal;
124 { // Get the field index for the score
125 struct session *se = client_get_session(client);
126 n->scorefield = conf_service_metadata_field_id(se->service, n->native_score);
128 yaz_log(YLOG_LOG,"Normalizing: Client %d uses '%s' = %d",
129 n->num, n->native_score, n->scorefield );
134 // Add all records from a cluster into the list for that client, for normalizing later
135 static void setup_norm_record( struct relevance *rel, struct record_cluster *clust)
137 struct record *record;
138 for (record = clust->records; record; record = record->next)
140 struct norm_client *norm = findnorm(rel, record->client);
141 struct norm_record *rp;
142 if ( norm->scorefield == scorefield_none)
143 break; // not interested in normalizing this client
144 rp = nmem_malloc(rel->nmem, sizeof(struct norm_record) );
146 rp->next = norm->records;
150 if ( norm->scorefield == scorefield_position )
151 rp->score = 1.0 / record->position;
152 else if ( norm->scorefield == scorefield_internal )
153 rp->score = clust->relevance_score; // the tf/idf for the whole cluster
154 // TODO - Get them for each record, merge later!
157 struct record_metadata *md = record->metadata[norm->scorefield];
158 rp->score = md->data.fnumber;
160 yaz_log(YLOG_LOG,"Got score for %d/%d : %f ",
161 norm->num, record->position, rp->score );
162 record -> score = rp->score;
163 if ( norm->count == 1 )
165 norm->max = rp->score;
166 norm->min = rp->score;
168 if ( rp->score > norm->max )
169 norm->max = rp->score;
170 if ( rp->score < norm->min )
171 norm->min = rp->score;
176 // Calculate the squared sum of residuals, that is the difference from
177 // normalized values to the target curve, which is 1/n
178 static double squaresum( struct norm_record *rp, double a, double b)
181 for ( ; rp; rp = rp->next )
183 double target = 1.0 / rp->record->position;
184 double normscore = rp->score * a + b;
185 double diff = target - normscore;
191 // For each client, normalize scores
192 static void normalize_scores(struct relevance *rel)
194 const int maxiterations = 1000;
195 const double enough = 100.0; // sets the number of decimals we are happy with
196 const double stepchange = 0.5; // reduction of the step size when finding middle
197 // 0.5 sems to be magical, much better than 0.4 or 0.6
198 struct norm_client *norm;
199 for ( norm = rel->norm; norm; norm = norm->next )
201 yaz_log(YLOG_LOG,"Normalizing client %d: scorefield=%d count=%d range=%f %f = %f",
202 norm->num, norm->scorefield, norm->count, norm->min,
203 norm->max, norm->max-norm->min);
204 norm->a = 1.0; // default normalizing factors, no change
206 if ( norm->scorefield != scorefield_none &&
207 norm->scorefield != scorefield_position )
208 { // have something to normalize
209 double range = norm->max - norm->min;
211 double a,b; // params to optimize
212 double as,bs; // step sizes
215 // initial guesses for the parameters
216 // Rmax = a * rmax + b # want to be 1.0
217 // Rmin = a * rmin + b # want to be 0.0
218 // Rmax - Rmin = a ( rmax - rmin ) # subtracting equations
219 // 1.0 - 0.0 = a ( rmax - rmin )
221 // Rmin = a * rmin + b
222 // b = Rmin - a * rmin
223 // = 0.0 - 1/range * rmin
226 if ( range < 1e-6 ) // practically zero
229 b = -1.0 * norm->min / range;
230 // b = fabs(norm->min) / range;
233 chi = squaresum( norm->records, a,b);
234 yaz_log(YLOG_LOG,"Initial done: it=%d: a=%f / %f b=%f / %f chi = %f",
235 0, a, as, b, bs, chi );
236 while (it++ < maxiterations) // safeguard against things not converging
238 double aplus = squaresum(norm->records, a+as, b);
239 double aminus= squaresum(norm->records, a-as, b);
240 double bplus = squaresum(norm->records, a, b+bs);
241 double bminus= squaresum(norm->records, a, b-bs);
242 double prevchi = chi;
243 if ( aplus < chi && aplus < aminus && aplus < bplus && aplus < bminus)
247 as = as * (1.0 + stepchange);
250 else if ( aminus < chi && aminus < aplus && aminus < bplus && aminus < bminus)
254 as = as * (1.0 + stepchange);
257 else if ( bplus < chi && bplus < aplus && bplus < aminus && bplus < bminus)
261 bs = bs * (1.0 + stepchange);
264 else if ( bminus < chi && bminus < aplus && bminus < bplus && bminus < aminus)
269 bs = bs * (1.0+stepchange);
272 { // a,b is the best so far, adjust one step size
273 // which one? The one that has the greatest effect to chi
274 // That is, the average of plus and minus is further away from chi
275 double adif = 0.5 * ( aplus + aminus ) - prevchi;
276 double bdif = 0.5 * ( bplus + bminus ) - prevchi;
277 if ( fabs(adif) > fabs(bdif) )
279 as = as * ( 1.0 - stepchange);
284 bs = bs * ( 1.0 - stepchange);
288 yaz_log(YLOG_LOG,"Fitting %s it=%d: a=%g %g b=%g %g chi=%g ap=%g am=%g, bp=%g bm=%g p=%g",
289 branch, it, a, as, b, bs, chi,
290 aplus, aminus, bplus, bminus, prevchi );
293 if ( fabs(as) * enough < fabs(a) &&
294 fabs(bs) * enough < fabs(b) ) {
295 break; // not changing much any more
299 yaz_log(YLOG_LOG,"Fitting done: it=%d: a=%g / %g b=%g / %g chi = %g",
300 it-1, a, as, b, bs, chi );
303 if ( norm->scorefield != scorefield_none )
304 { // distribute the normalized scores to the records
305 struct norm_record *nr = norm->records;
306 for ( ; nr ; nr = nr->next ) {
307 double r = nr->score;
308 r = norm->a * r + norm -> b;
309 nr->clust->relevance_score = 10000 * r;
310 nr->record->score = r;
311 yaz_log(YLOG_LOG,"Normalized %f * %f + %f = %f",
312 nr->score, norm->a, norm->b, r );
313 // TODO - This keeps overwriting the cluster score in random order!
314 // Need to merge results better
321 static struct word_entry *word_entry_match(struct relevance *r,
322 const char *norm_str,
323 const char *rank, int *weight)
326 struct word_entry *entries = r->entries;
327 for (; entries; entries = entries->next, i++)
329 if (*norm_str && !strcmp(norm_str, entries->norm_str))
333 sscanf(rank, "%d%n", weight, &no_read);
337 if (no_read > 0 && (cp = strchr(rank, ' ')))
339 if ((cp - rank) == strlen(entries->ccl_field) &&
340 memcmp(entries->ccl_field, rank, cp - rank) == 0)
341 *weight = atoi(cp + 1);
349 int relevance_snippet(struct relevance *r,
350 const char *words, const char *name,
354 const char *norm_str;
357 pp2_charset_token_first(r->prt, words, 0);
358 while ((norm_str = pp2_charset_token_next(r->prt)))
360 size_t org_start, org_len;
361 struct word_entry *entries = r->entries;
364 pp2_get_org(r->prt, &org_start, &org_len);
365 for (; entries; entries = entries->next, i++)
367 if (*norm_str && !strcmp(norm_str, entries->norm_str))
375 wrbuf_puts(w_snippet, "<match>");
384 wrbuf_puts(w_snippet, "</match>");
387 wrbuf_xmlputs_n(w_snippet, words + org_start, org_len);
390 wrbuf_puts(w_snippet, "</match>");
393 yaz_log(YLOG_DEBUG, "SNIPPET match: %s", wrbuf_cstr(w_snippet));
398 void relevance_countwords(struct relevance *r, struct record_cluster *cluster,
399 const char *words, const char *rank,
402 int *w = r->term_frequency_vec_tmp;
403 const char *norm_str;
405 double lead_decay = r->lead_decay;
406 struct word_entry *e;
407 WRBUF wr = cluster->relevance_explain1;
408 int printed_about_field = 0;
410 pp2_charset_token_first(r->prt, words, 0);
411 for (e = r->entries, i = 1; i < r->vec_len; i++, e = e->next)
418 while ((norm_str = pp2_charset_token_next(r->prt)))
420 int local_weight = 0;
421 e = word_entry_match(r, norm_str, rank, &local_weight);
427 if (!printed_about_field)
429 printed_about_field = 1;
430 wrbuf_printf(wr, "field=%s content=", name);
431 if (strlen(words) > 50)
433 wrbuf_xmlputs_n(wr, words, 49);
434 wrbuf_puts(wr, " ...");
437 wrbuf_xmlputs(wr, words);
438 wrbuf_puts(wr, ";\n");
440 assert(res < r->vec_len);
441 w[res] += local_weight / (1 + log2(1 + lead_decay * length));
442 wrbuf_printf(wr, "%s: w[%d] += w(%d) / "
443 "(1+log2(1+lead_decay(%f) * length(%d)));\n",
444 e->display_str, res, local_weight, lead_decay, length);
446 if (j > 0 && r->term_pos[j])
448 int d = length + 1 - r->term_pos[j];
449 wrbuf_printf(wr, "%s: w[%d] += w[%d](%d) * follow(%f) / "
451 e->display_str, res, res, w[res],
452 r->follow_factor, d);
453 w[res] += w[res] * r->follow_factor / (1 + log2(d));
455 for (j = 0; j < r->vec_len; j++)
456 r->term_pos[j] = j < res ? 0 : length + 1;
461 for (e = r->entries, i = 1; i < r->vec_len; i++, e = e->next)
463 if (length == 0 || w[i] == 0)
465 wrbuf_printf(wr, "%s: tf[%d] += w[%d](%d)", e->display_str, i, i, w[i]);
466 switch (r->length_divide)
469 cluster->term_frequency_vecf[i] += (double) w[i];
472 wrbuf_printf(wr, " / log2(1+length(%d))", length);
473 cluster->term_frequency_vecf[i] +=
474 (double) w[i] / log2(1 + length);
477 wrbuf_printf(wr, " / length(%d)", length);
478 cluster->term_frequency_vecf[i] += (double) w[i] / length;
480 cluster->term_frequency_vec[i] += w[i];
481 wrbuf_printf(wr, " (%f);\n", cluster->term_frequency_vecf[i]);
484 cluster->term_frequency_vec[0] += length;
487 static void pull_terms(struct relevance *res, struct ccl_rpn_node *n)
500 pull_terms(res, n->u.p[0]);
501 pull_terms(res, n->u.p[1]);
504 nmem_strsplit(res->nmem, " ", n->u.t.term, &words, &numwords);
505 for (i = 0; i < numwords; i++)
507 const char *norm_str;
509 ccl_field = nmem_strdup_null(res->nmem, n->u.t.qual);
511 pp2_charset_token_first(res->prt, words[i], 0);
512 while ((norm_str = pp2_charset_token_next(res->prt)))
514 struct word_entry **e = &res->entries;
517 *e = nmem_malloc(res->nmem, sizeof(**e));
518 (*e)->norm_str = nmem_strdup(res->nmem, norm_str);
519 (*e)->ccl_field = ccl_field;
520 (*e)->termno = res->vec_len++;
521 (*e)->display_str = nmem_strdup(res->nmem, words[i]);
530 void relevance_clear(struct relevance *r)
535 for (i = 0; i < r->vec_len; i++)
536 r->doc_frequency_vec[i] = 0;
540 struct relevance *relevance_create_ccl(pp2_charset_fact_t pft,
541 struct ccl_rpn_node *query,
543 double follow_factor, double lead_decay,
546 NMEM nmem = nmem_create();
547 struct relevance *res = nmem_malloc(nmem, sizeof(*res));
552 res->rank_cluster = rank_cluster;
553 res->follow_factor = follow_factor;
554 res->lead_decay = lead_decay;
555 res->length_divide = length_divide;
557 res->prt = pp2_charset_token_create(pft, "relevance");
559 pull_terms(res, query);
561 res->doc_frequency_vec = nmem_malloc(nmem, res->vec_len * sizeof(int));
564 res->term_frequency_vec_tmp =
565 nmem_malloc(res->nmem,
566 res->vec_len * sizeof(*res->term_frequency_vec_tmp));
569 nmem_malloc(res->nmem, res->vec_len * sizeof(*res->term_pos));
571 relevance_clear(res);
575 void relevance_destroy(struct relevance **rp)
579 pp2_charset_token_destroy((*rp)->prt);
580 nmem_destroy((*rp)->nmem);
585 void relevance_mergerec(struct relevance *r, struct record_cluster *dst,
586 const struct record_cluster *src)
590 for (i = 0; i < r->vec_len; i++)
591 dst->term_frequency_vec[i] += src->term_frequency_vec[i];
593 for (i = 0; i < r->vec_len; i++)
594 dst->term_frequency_vecf[i] += src->term_frequency_vecf[i];
597 void relevance_newrec(struct relevance *r, struct record_cluster *rec)
601 // term frequency [1,..] . [0] is total length of all fields
602 rec->term_frequency_vec =
604 r->vec_len * sizeof(*rec->term_frequency_vec));
605 for (i = 0; i < r->vec_len; i++)
606 rec->term_frequency_vec[i] = 0;
608 // term frequency divided by length of field [1,...]
609 rec->term_frequency_vecf =
611 r->vec_len * sizeof(*rec->term_frequency_vecf));
612 for (i = 0; i < r->vec_len; i++)
613 rec->term_frequency_vecf[i] = 0.0;
616 void relevance_donerecord(struct relevance *r, struct record_cluster *cluster)
620 for (i = 1; i < r->vec_len; i++)
621 if (cluster->term_frequency_vec[i] > 0)
622 r->doc_frequency_vec[i]++;
624 r->doc_frequency_vec[0]++;
629 // Prepare for a relevance-sorted read
630 void relevance_prepare_read(struct relevance *rel, struct reclist *reclist)
633 float *idfvec = xmalloc(rel->vec_len * sizeof(float));
635 reclist_enter(reclist);
637 // Calculate document frequency vector for each term.
638 for (i = 1; i < rel->vec_len; i++)
640 if (!rel->doc_frequency_vec[i])
644 /* add one to nominator idf(t,D) to ensure a value > 0 */
645 idfvec[i] = log((float) (1 + rel->doc_frequency_vec[0]) /
646 rel->doc_frequency_vec[i]);
649 // Calculate relevance for each document (cluster)
654 struct word_entry *e = rel->entries;
655 struct record_cluster *rec = reclist_read_record(reclist);
658 w = rec->relevance_explain2;
660 wrbuf_puts(w, "relevance = 0;\n");
661 for (i = 1; i < rel->vec_len; i++)
663 float termfreq = (float) rec->term_frequency_vecf[i];
664 int add = 100000 * termfreq * idfvec[i];
666 wrbuf_printf(w, "idf[%d] = log(((1 + total(%d))/termoccur(%d));\n",
667 i, rel->doc_frequency_vec[0],
668 rel->doc_frequency_vec[i]);
669 wrbuf_printf(w, "%s: relevance += 100000 * tf[%d](%f) * "
670 "idf[%d](%f) (%d);\n",
671 e->display_str, i, termfreq, i, idfvec[i], add);
675 if (!rel->rank_cluster)
677 struct record *record;
678 int cluster_size = 0;
680 for (record = rec->records; record; record = record->next)
683 wrbuf_printf(w, "score = relevance(%d)/cluster_size(%d);\n",
684 relevance, cluster_size);
685 relevance /= cluster_size;
689 wrbuf_printf(w, "score = relevance(%d);\n", relevance);
691 rec->relevance_score = relevance;
693 // Build the normalizing structures
694 // List of (sub)records for each target
695 setup_norm_record( rel, rec );
699 normalize_scores(rel);
701 // TODO - Calculate the cluster scores from individual records
702 // At the moment the record scoring puts one of them in the cluster...
703 reclist_rewind(reclist);
705 reclist_leave(reclist);
713 * c-file-style: "Stroustrup"
714 * indent-tabs-mode: nil
716 * vim: shiftwidth=4 tabstop=8 expandtab