Text Generation
Transformers
Safetensors
llama
go
text-generation-inference
kenhktsui commited on
Commit
38a0dc2
·
verified ·
1 Parent(s): af4e0cb

Update tokenizer.py

Browse files
Files changed (1) hide show
  1. tokenizer.py +1 -18
tokenizer.py CHANGED
@@ -5,7 +5,6 @@ from transformers import PreTrainedTokenizer
5
 
6
 
7
  class AlphabetTokenizer(PreTrainedTokenizer):
8
- vocab_files_names = {"vocab_file": "vocab.json"}
9
  special_tokens_dict = {
10
  'unk_token': '[UNK]',
11
  'sep_token': '[SEP]',
@@ -15,7 +14,7 @@ class AlphabetTokenizer(PreTrainedTokenizer):
15
  }
16
 
17
  def __init__(self, **kwargs):
18
- self.alphabet = [chr(i) for i in range(65, 65+19)] + [chr(i).lower() for i in range(65, 65+19)] + [str(i) for i in range(0, 10)] + ['.', '+', '-', ' ', 'W']
19
  self.vocab = {char: i for i, char in enumerate(self.alphabet)}
20
  self.inv_vocab = {i: char for char, i in self.vocab.items()}
21
 
@@ -78,19 +77,3 @@ class AlphabetTokenizer(PreTrainedTokenizer):
78
  f.write(json.dumps(self.vocab, ensure_ascii=False))
79
 
80
  return (vocab_file,)
81
-
82
- @classmethod
83
- def from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
84
- """Load the tokenizer from a pretrained model vocabulary."""
85
- tokenizer = cls(*init_inputs, **kwargs)
86
- vocab_file = os.path.join(pretrained_model_name_or_path, tokenizer.vocab_files_names["vocab_file"])
87
- if os.path.isfile(vocab_file):
88
- with open(vocab_file, "r", encoding="utf-8") as f:
89
- vocab = json.load(f)
90
- tokenizer.vocab = vocab
91
- tokenizer.inv_vocab = {v: k for k, v in vocab.items()}
92
-
93
- # override default _add_tokens of special tokens, and we added manually
94
- tokenizer._added_tokens_decoder = {}
95
- tokenizer.add_special_tokens(cls.special_tokens_dict)
96
- return tokenizer
 
5
 
6
 
7
  class AlphabetTokenizer(PreTrainedTokenizer):
 
8
  special_tokens_dict = {
9
  'unk_token': '[UNK]',
10
  'sep_token': '[SEP]',
 
14
  }
15
 
16
  def __init__(self, **kwargs):
17
+ self.alphabet = [chr(i) for i in range(65, 65+19)] + [chr(i).lower() for i in range(65, 65+19)] + [str(i) for i in range(0, 10)] + ['.', '+', '-', ' ', 'W', '>', 'X']
18
  self.vocab = {char: i for i, char in enumerate(self.alphabet)}
19
  self.inv_vocab = {i: char for char, i in self.vocab.items()}
20
 
 
77
  f.write(json.dumps(self.vocab, ensure_ascii=False))
78
 
79
  return (vocab_file,)