-
Notifications
You must be signed in to change notification settings - Fork 143
/
cdr_example.py
70 lines (62 loc) · 2.24 KB
/
cdr_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for Collaborative Deep Ranking (CDR)"""
import cornac
from cornac.data import Reader
from cornac.datasets import citeulike
from cornac.eval_methods import RatioSplit
from cornac.data import TextModality
from cornac.data.text import BaseTokenizer
# CDR composes an autoencoder with a ranking collaborative model to represent item texts and user-item interactions
# The necessary data can be loaded as follows
docs, item_ids = citeulike.load_text()
feedback = citeulike.load_feedback(reader=Reader(item_set=item_ids))
# Instantiate a TextModality, it makes it convenient to work with text auxiliary information
# For more details, please refer to the tutorial on how to work with auxiliary data
item_text_modality = TextModality(
corpus=docs,
ids=item_ids,
tokenizer=BaseTokenizer(stop_words="english"),
max_vocab=8000,
max_doc_freq=0.5,
)
# Define an evaluation method to split feedback into train and test sets
ratio_split = RatioSplit(
data=feedback,
test_size=0.2,
exclude_unknowns=True,
item_text=item_text_modality,
verbose=True,
seed=123,
rating_threshold=0.5,
)
# Instantiate CDR model
cdr = cornac.models.CDR(
k=50,
autoencoder_structure=[200],
max_iter=100,
batch_size=128,
lambda_u=0.01,
lambda_v=0.1,
lambda_w=0.0001,
lambda_n=5,
learning_rate=0.001,
vocab_size=8000,
seed=123
)
# Use Recall@300 for evaluation
rec_300 = cornac.metrics.Recall(k=300)
# Put everything together into an experiment and run it
cornac.Experiment(eval_method=ratio_split, models=[cdr], metrics=[rec_300]).run()