ylacombe HF staff Xenova HF staff commited on
Commit
382b89b
1 Parent(s): 3ad57c7

Upload tokenizer.json with huggingface_hub (#2)

Browse files

- Upload tokenizer.json with huggingface_hub (c3e1d1ef9e627d2d3dfa492a932a4130cfb33798)


Co-authored-by: Joshua <[email protected]>

Files changed (1) hide show
  1. tokenizer.json +103 -0
tokenizer.json ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 45,
8
+ "content": "<unk>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ }
15
+ ],
16
+ "normalizer": {
17
+ "type": "Sequence",
18
+ "normalizers": [
19
+ {
20
+ "type": "Lowercase"
21
+ },
22
+ {
23
+ "type": "Replace",
24
+ "pattern": {
25
+ "Regex": "[^7avc\u201405\u00f38pyz4m\u00fcks\u00e1qhn\u00e9_91ft xd\u00edb3jgl2iue\u00fao\u00f1r6]"
26
+ },
27
+ "content": ""
28
+ },
29
+ {
30
+ "type": "Strip",
31
+ "strip_left": true,
32
+ "strip_right": true
33
+ },
34
+ {
35
+ "type": "Replace",
36
+ "pattern": {
37
+ "Regex": "(?=.)|(?<!^)$"
38
+ },
39
+ "content": "7"
40
+ }
41
+ ]
42
+ },
43
+ "pre_tokenizer": {
44
+ "type": "Split",
45
+ "pattern": {
46
+ "Regex": ""
47
+ },
48
+ "behavior": "Isolated",
49
+ "invert": false
50
+ },
51
+ "post_processor": null,
52
+ "decoder": null,
53
+ "model": {
54
+ "vocab": {
55
+ "7": 0,
56
+ "a": 1,
57
+ "v": 2,
58
+ "c": 3,
59
+ "\u2014": 4,
60
+ "0": 5,
61
+ "5": 6,
62
+ "\u00f3": 7,
63
+ "8": 8,
64
+ "p": 9,
65
+ "y": 10,
66
+ "z": 11,
67
+ "4": 12,
68
+ "m": 13,
69
+ "\u00fc": 14,
70
+ "k": 15,
71
+ "s": 16,
72
+ "\u00e1": 17,
73
+ "q": 18,
74
+ "h": 19,
75
+ "n": 20,
76
+ "\u00e9": 21,
77
+ "_": 22,
78
+ "9": 23,
79
+ "1": 24,
80
+ "f": 25,
81
+ "t": 26,
82
+ " ": 27,
83
+ "x": 28,
84
+ "d": 29,
85
+ "\u00ed": 30,
86
+ "b": 31,
87
+ "3": 32,
88
+ "j": 33,
89
+ "g": 34,
90
+ "l": 35,
91
+ "2": 36,
92
+ "i": 37,
93
+ "u": 38,
94
+ "e": 39,
95
+ "\u00fa": 40,
96
+ "o": 41,
97
+ "\u00f1": 42,
98
+ "r": 43,
99
+ "6": 44,
100
+ "<unk>": 45
101
+ }
102
+ }
103
+ }