int vec_len;
struct word_entry *entries;
pp2_charset_token_t prt;
+ int rank_cluster;
NMEM nmem;
};
struct word_entry *next;
};
-int word_entry_match(struct word_entry *entries, const char *norm_str)
+static int word_entry_match(struct word_entry *entries, const char *norm_str,
+ const char *rank, int *mult)
{
for (; entries; entries = entries->next)
{
- if (!strcmp(norm_str, entries->norm_str))
+ if (*norm_str && !strcmp(norm_str, entries->norm_str))
+ {
+ const char *cp = 0;
+ int no_read = 0;
+ sscanf(rank, "%d%n", mult, &no_read);
+ rank += no_read;
+ while (*rank == ' ')
+ rank++;
+ if (no_read > 0 && (cp = strchr(rank, ' ')))
+ {
+ if ((cp - rank) == strlen(entries->ccl_field) &&
+ memcmp(entries->ccl_field, rank, cp - rank) == 0)
+ *mult = atoi(cp + 1);
+ }
return entries->termno;
+ }
}
return 0;
}
void relevance_countwords(struct relevance *r, struct record_cluster *cluster,
- const char *words, int multiplier, const char *name)
+ const char *words, const char *rank,
+ const char *name)
{
int *mult = cluster->term_frequency_vec_tmp;
const char *norm_str;
int i, length = 0;
-
pp2_charset_token_first(r->prt, words, 0);
for (i = 1; i < r->vec_len; i++)
mult[i] = 0;
+ assert(rank);
while ((norm_str = pp2_charset_token_next(r->prt)))
{
- int res = word_entry_match(r->entries, norm_str);
+ int local_mult = 0;
+ int res = word_entry_match(r->entries, norm_str, rank, &local_mult);
if (res)
{
assert(res < r->vec_len);
- mult[res] += multiplier;
+ mult[res] += local_mult;
}
length++;
}
for (i = 0; i < numwords; i++)
{
const char *norm_str;
-
+
ccl_field = nmem_strdup_null(res->nmem, n->u.t.qual);
pp2_charset_token_first(res->prt, words[i], 0);
}
struct relevance *relevance_create_ccl(pp2_charset_fact_t pft,
- NMEM nmem, struct ccl_rpn_node *query)
+ struct ccl_rpn_node *query,
+ int rank_cluster)
{
+ NMEM nmem = nmem_create();
struct relevance *res = nmem_malloc(nmem, sizeof(*res));
int i;
res->nmem = nmem;
res->entries = 0;
res->vec_len = 1;
+ res->rank_cluster = rank_cluster;
res->prt = pp2_charset_token_create(pft, "relevance");
-
+
pull_terms(res, query);
res->doc_frequency_vec = nmem_malloc(nmem, res->vec_len * sizeof(int));
for (i = 0; i < res->vec_len; i++)
- res->doc_frequency_vec[i] = 0;
+ res->doc_frequency_vec[i] = 0;
return res;
}
if (*rp)
{
pp2_charset_token_destroy((*rp)->prt);
+ nmem_destroy((*rp)->nmem);
*rp = 0;
}
}
r->vec_len * sizeof(*rec->term_frequency_vec));
for (i = 0; i < r->vec_len; i++)
rec->term_frequency_vec[i] = 0;
-
+
// term frequency divided by length of field [1,...]
rec->term_frequency_vecf =
nmem_malloc(r->nmem,
r->vec_len * sizeof(*rec->term_frequency_vecf));
for (i = 0; i < r->vec_len; i++)
rec->term_frequency_vecf[i] = 0.0;
-
+
// for relevance_countwords (so we don't have to xmalloc/xfree)
rec->term_frequency_vec_tmp =
nmem_malloc(r->nmem,
idfvec[i] = 0;
else
{
- // This conditional may be terribly wrong
- // It was there to address the situation where vec[0] == vec[i]
- // which leads to idfvec[i] == 0... not sure about this
- // Traditional TF-IDF may assume that a word that occurs in every
- // record is irrelevant, but this is actually something we will
- // see a lot
- if ((idfvec[i] = log((float) rel->doc_frequency_vec[0] /
- rel->doc_frequency_vec[i])) < 0.0000001)
- idfvec[i] = 1;
+ /* add one to nominator idf(t,D) to ensure a value > 0 */
+ idfvec[i] = log((float) (1 + rel->doc_frequency_vec[0]) /
+ rel->doc_frequency_vec[i]);
}
}
// Calculate relevance for each document
for (t = 1; t < rel->vec_len; t++)
{
float termfreq = (float) rec->term_frequency_vecf[t];
- relevance += 100000 * (termfreq * idfvec[t] + 0.0000005);
+ relevance += 100000 * termfreq * idfvec[t];
+ }
+ if (!rel->rank_cluster)
+ {
+ struct record *record;
+ int cluster_size = 0;
+
+ for (record = rec->records; record; record = record->next)
+ cluster_size++;
+
+ relevance /= cluster_size;
}
rec->relevance_score = relevance;
}