Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

prepare-wmt14en2de.sh 3.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  1. #!/bin/bash
  2. # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh
  3. echo 'Cloning Moses github repository (for tokenization scripts)...'
  4. git clone https://github.com/moses-smt/mosesdecoder.git
  5. echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
  6. git clone https://github.com/rsennrich/subword-nmt.git
  7. SCRIPTS=mosesdecoder/scripts
  8. TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
  9. CLEAN=$SCRIPTS/training/clean-corpus-n.perl
  10. NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl
  11. REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
  12. BPEROOT=subword-nmt
  13. BPE_TOKENS=40000
  14. URLS=(
  15. "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz"
  16. "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz"
  17. "http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz"
  18. "http://statmt.org/wmt14/test-full.tgz"
  19. )
  20. FILES=(
  21. "training-parallel-europarl-v7.tgz"
  22. "training-parallel-commoncrawl.tgz"
  23. "training-parallel-nc-v12.tgz"
  24. "test-full.tgz"
  25. )
  26. CORPORA=(
  27. "training/europarl-v7.de-en"
  28. "commoncrawl.de-en"
  29. "training/news-commentary-v12.de-en"
  30. )
  31. # This will make the dataset compatible to the one used in "Convolutional Sequence to Sequence Learning"
  32. # https://arxiv.org/abs/1705.03122
  33. if [ "$1" == "--icml17" ]; then
  34. URLS[2]="http://statmt.org/wmt14/training-parallel-nc-v9.tgz"
  35. FILES[2]="training-parallel-nc-v9.tgz"
  36. CORPORA[2]="training/news-commentary-v9.de-en"
  37. fi
  38. if [ ! -d "$SCRIPTS" ]; then
  39. echo "Please set SCRIPTS variable correctly to point to Moses scripts."
  40. exit
  41. fi
  42. src=en
  43. tgt=de
  44. lang=en-de
  45. prep=wmt14_en_de
  46. tmp=$prep/tmp
  47. orig=orig
  48. mkdir -p $orig $tmp $prep
  49. cd $orig
  50. for ((i=0;i<${#URLS[@]};++i)); do
  51. file=${FILES[i]}
  52. if [ -f $file ]; then
  53. echo "$file already exists, skipping download"
  54. else
  55. url=${URLS[i]}
  56. wget "$url"
  57. if [ -f $file ]; then
  58. echo "$url successfully downloaded."
  59. else
  60. echo "$url not successfully downloaded."
  61. exit -1
  62. fi
  63. if [ ${file: -4} == ".tgz" ]; then
  64. tar zxvf $file
  65. elif [ ${file: -4} == ".tar" ]; then
  66. tar xvf $file
  67. fi
  68. fi
  69. done
  70. cd ..
  71. echo "pre-processing train data..."
  72. for l in $src $tgt; do
  73. rm $tmp/train.tags.$lang.tok.$l
  74. for f in "${CORPORA[@]}"; do
  75. cat $orig/$f.$l | \
  76. perl $NORM_PUNC $l | \
  77. perl $REM_NON_PRINT_CHAR | \
  78. perl $TOKENIZER -threads 8 -a -l $l >> $tmp/train.tags.$lang.tok.$l
  79. done
  80. done
  81. echo "pre-processing test data..."
  82. for l in $src $tgt; do
  83. if [ "$l" == "$src" ]; then
  84. t="src"
  85. else
  86. t="ref"
  87. fi
  88. grep '<seg id' $orig/test-full/newstest2014-deen-$t.$l.sgm | \
  89. sed -e 's/<seg id="[0-9]*">\s*//g' | \
  90. sed -e 's/\s*<\/seg>\s*//g' | \
  91. sed -e "s/\’/\'/g" | \
  92. perl $TOKENIZER -threads 8 -a -l $l > $tmp/test.$l
  93. echo ""
  94. done
  95. echo "splitting train and valid..."
  96. for l in $src $tgt; do
  97. awk '{if (NR%100 == 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/valid.$l
  98. awk '{if (NR%100 != 0) print $0; }' $tmp/train.tags.$lang.tok.$l > $tmp/train.$l
  99. done
  100. TRAIN=$tmp/train.de-en
  101. BPE_CODE=$prep/code
  102. rm -f $TRAIN
  103. for l in $src $tgt; do
  104. cat $tmp/train.$l >> $TRAIN
  105. done
  106. echo "learn_bpe.py on ${TRAIN}..."
  107. python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE
  108. for L in $src $tgt; do
  109. for f in train.$L valid.$L test.$L; do
  110. echo "apply_bpe.py to ${f}..."
  111. python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $tmp/bpe.$f
  112. done
  113. done
  114. perl $CLEAN -ratio 1.5 $tmp/bpe.train $src $tgt $prep/train 1 250
  115. perl $CLEAN -ratio 1.5 $tmp/bpe.valid $src $tgt $prep/valid 1 250
  116. for L in $src $tgt; do
  117. cp $tmp/bpe.test.$L $prep/test.$L
  118. done
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...