-
Notifications
You must be signed in to change notification settings - Fork 6
/
csmtiser-config.yaml
61 lines (44 loc) · 2.33 KB
/
csmtiser-config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# -*- coding: utf-8 -*-
# Configuration file of the normaliser. The paths to Moses components and various parameters are set here.
# Absolute path to the directory in which the models should be created
working_dir: <<MODELDIR>>
# Encoding of all the datasets
encoding: utf8
# the character to be used internally for marking word boundaries
# this must be a single character that is different from the space character and does not appear anywhere in the corpus
tokenseparator: ÷
# Whether the data has to be tokenised, ie. spaces put before punctuation etc.
tokenise: False
# Whether the data should be truecased.
truecase: False
# Location of the dataset on which truecasing should be learnt, if no dataset (or model) is given, truecasing is learnt on both sides of the training data
truecase_dataset: null
# If you already have a truecasing model available, just give its path and that model will be used
truecase_model: null
# Whether the data should be lowercased, if the data is written in non-standard orthography (like Twitter data), this is probably a good idea, however, all normalisation will therefore be lowercased as well
lowercase: False
# Whether the output should be verticalised and token-aligned
align: False
# Training datasets
train_orig: <<TMPDIR>>/train.orig
train_norm: <<TMPDIR>>/train.norm
# Percentage of training data to be used for development set (for tuning the system)
# If you have a dev set aside of the training data, define the path in the dev variable
dev_perc: 0
dev_orig: <<TMPDIR>>/dev.orig
dev_norm: <<TMPDIR>>/dev.norm
# Location of the datasets for language modeling, the target-side training data is always used (does not have to be defined)
# Experiments show that using multiple relevant target-language datasets as language models is the easiest way to improve your results
lms: []
# Order of the language model, if you have compiled KenLM allowing orders higher than 6, order 10 has shown to yield best results
lm_order: 6
# Location of the Moses scripts
moses_scripts: <<MOSESDIR>>/scripts/
# Location of the KenLM tool for language modeling (by default installed with Moses)
kenlm: <<MOSESDIR>>/bin/
# Location of the Moses decoder, mostly same as location of KenLM
moses: <<MOSESDIR>>/bin/
# Location of mgiza
mgiza: <<MOSESDIR>>/mgiza/mgizapp/bin/
# Number of CPU cores to use during training and tuning
num_cores: 10