Text Generation
Transformers
Safetensors
chess_transformer
chess
llm-course
chess-challenge
custom_code
Instructions to use LLM-course/simple_tokenizer with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use LLM-course/simple_tokenizer with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="LLM-course/simple_tokenizer", trust_remote_code=True)# Load model directly from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("LLM-course/simple_tokenizer", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use LLM-course/simple_tokenizer with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "LLM-course/simple_tokenizer" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "LLM-course/simple_tokenizer", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }'Use Docker
docker model run hf.co/LLM-course/simple_tokenizer
- SGLang
How to use LLM-course/simple_tokenizer with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "LLM-course/simple_tokenizer" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "LLM-course/simple_tokenizer", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "LLM-course/simple_tokenizer" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "LLM-course/simple_tokenizer", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }' - Docker Model Runner
How to use LLM-course/simple_tokenizer with Docker Model Runner:
docker model run hf.co/LLM-course/simple_tokenizer
| """ | |
| Custom Chess Tokenizer for the Chess Challenge. | |
| This tokenizer treats each move as a single token using the extended UCI notation | |
| from the Lichess dataset (e.g., WPe2e4, BNg8f6). | |
| The dataset format uses: | |
| - W/B prefix for White/Black | |
| - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King | |
| - Source and destination squares (e.g., e2e4) | |
| - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling | |
| """ | |
| from __future__ import annotations | |
| import json | |
| import re | |
| import os | |
| from pathlib import Path | |
| from typing import Dict, List, Optional | |
| from transformers import PreTrainedTokenizer | |
| class ChessTokenizer(PreTrainedTokenizer): | |
| """ | |
| A custom tokenizer for chess moves using extended UCI notation. | |
| This tokenizer maps each possible chess move to a unique token ID. | |
| The vocabulary is built from the training dataset to ensure all moves | |
| encountered during training have a corresponding token. | |
| Example: | |
| >>> tokenizer = ChessTokenizer() | |
| >>> tokenizer.encode("WPe2e4 BPe7e5") | |
| [1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS] | |
| """ | |
| model_input_names = ["input_ids", "attention_mask"] | |
| vocab_files_names = {"vocab_file": "vocab.json"} | |
| # Special tokens | |
| PAD_TOKEN = "[PAD]" | |
| BOS_TOKEN = "[BOS]" | |
| EOS_TOKEN = "[EOS]" | |
| UNK_TOKEN = "[UNK]" | |
| def __init__( | |
| self, | |
| vocab_file: Optional[str] = None, | |
| vocab: Optional[Dict[str, int]] = None, | |
| **kwargs, | |
| ): | |
| """ | |
| Initialize the chess tokenizer. | |
| Args: | |
| vocab_file: Path to a JSON file containing the vocabulary mapping. | |
| vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). | |
| **kwargs: Additional arguments passed to PreTrainedTokenizer. | |
| """ | |
| # Initialize special tokens | |
| self._pad_token = self.PAD_TOKEN | |
| self._bos_token = self.BOS_TOKEN | |
| self._eos_token = self.EOS_TOKEN | |
| self._unk_token = self.UNK_TOKEN | |
| # Remove any duplicate special-token entries passed through kwargs | |
| # to avoid "multiple values for keyword" errors when loading from disk. | |
| kwargs.pop("pad_token", None) | |
| kwargs.pop("bos_token", None) | |
| kwargs.pop("eos_token", None) | |
| kwargs.pop("unk_token", None) | |
| # Load or create vocabulary | |
| if vocab is not None: | |
| self._vocab = vocab | |
| elif vocab_file is not None and os.path.exists(vocab_file): | |
| with open(vocab_file, "r", encoding="utf-8") as f: | |
| self._vocab = json.load(f) | |
| else: | |
| # Create a minimal vocabulary with just special tokens | |
| # The full vocabulary should be built from the dataset | |
| self._vocab = self._create_default_vocab() | |
| # Create reverse mapping | |
| self._ids_to_tokens = {v: k for k, v in self._vocab.items()} | |
| # Call parent init AFTER setting up vocab | |
| super().__init__( | |
| pad_token=self._pad_token, | |
| bos_token=self._bos_token, | |
| eos_token=self._eos_token, | |
| unk_token=self._unk_token, | |
| **kwargs, | |
| ) | |
| def _create_default_vocab(self) -> Dict[str, int]: | |
| """ | |
| Create a minimal default vocabulary with just special tokens. | |
| For the full vocabulary, use `build_vocab_from_dataset()`. | |
| This minimal vocab is just a placeholder - you should build from data. | |
| """ | |
| special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] | |
| vocab = {token: idx for idx, token in enumerate(special_tokens)} | |
| return vocab | |
| def build_vocab_from_iterator( | |
| cls, | |
| iterator, | |
| min_frequency: int = 1, | |
| ) -> "ChessTokenizer": | |
| """ | |
| Build a tokenizer vocabulary from an iterator of game strings. | |
| Args: | |
| iterator: An iterator yielding game strings (space-separated moves). | |
| min_frequency: Minimum frequency for a token to be included. | |
| Returns: | |
| A ChessTokenizer with the built vocabulary. | |
| """ | |
| # from collections import Counter | |
| # | |
| # token_counts = Counter() | |
| # for game in iterator: | |
| # moves = game.strip().split() | |
| # token_counts.update(moves) | |
| # | |
| # | |
| # # Filter by frequency | |
| # tokens = [ | |
| # token for token, count in token_counts.items() | |
| # if count >= min_frequency | |
| # ] | |
| # | |
| # # Sort for reproducibility | |
| # tokens = sorted(tokens) | |
| # Build vocabulary | |
| special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] | |
| piece = ['K', 'Q', 'R', 'B', 'N', 'P'] | |
| move = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'h8'] | |
| # vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)} | |
| vocab = {token: idx for idx, token in enumerate(special_tokens + piece + move)} | |
| return cls(vocab=vocab) | |
| def build_vocab_from_dataset( | |
| cls, | |
| dataset_name: str = "dlouapre/lichess_2025-01_1M", | |
| split: str = "train", | |
| column: str = "text", | |
| min_frequency: int = 500, | |
| max_samples: Optional[int] = 100000, | |
| ) -> "ChessTokenizer": | |
| """ | |
| Build a tokenizer vocabulary from a Hugging Face dataset. | |
| Args: | |
| dataset_name: Name of the dataset on Hugging Face Hub. | |
| split: Dataset split to use. | |
| column: Column containing the game strings. | |
| min_frequency: Minimum frequency for a token to be included (default: 500). | |
| max_samples: Maximum number of samples to process (default: 100k). | |
| Returns: | |
| A ChessTokenizer with the built vocabulary. | |
| """ | |
| from datasets import load_dataset | |
| dataset = load_dataset(dataset_name, split=split) | |
| if max_samples is not None: | |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) | |
| def game_iterator(): | |
| for example in dataset: | |
| yield example[column] | |
| return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency) | |
| def vocab_size(self) -> int: | |
| """Return the size of the vocabulary.""" | |
| return len(self._vocab) | |
| def get_vocab(self) -> Dict[str, int]: | |
| """Return the vocabulary as a dictionary.""" | |
| return dict(self._vocab) | |
| def _tokenize(self, text: str) -> List[str]: | |
| """ | |
| Tokenize a string of moves into a list of tokens. | |
| Args: | |
| text: A string of space-separated moves. | |
| Returns: | |
| List of move tokens. | |
| """ | |
| regex = r"^[WB]([KQRBNP])([a-h][1-8])([a-h][1-8])" | |
| tokens = [] | |
| for move in text.strip().split(): | |
| match = re.search(regex, move) | |
| if match: | |
| tokens += list(match.groups()) | |
| else: | |
| tokens += self.UNK_TOKEN | |
| return tokens | |
| def _convert_token_to_id(self, token: str) -> int: | |
| """Convert a token to its ID.""" | |
| return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) | |
| def _convert_id_to_token(self, index: int) -> str: | |
| """Convert an ID to its token.""" | |
| return self._ids_to_tokens.get(index, self.UNK_TOKEN) | |
| def convert_tokens_to_string(self, tokens: List[str]) -> str: | |
| """Convert a list of tokens back to a string.""" | |
| # Filter out special tokens for cleaner output | |
| special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} | |
| return " ".join(t for t in tokens if t not in special) | |
| def save_vocabulary( | |
| self, | |
| save_directory: str, | |
| filename_prefix: Optional[str] = None, | |
| ) -> tuple: | |
| """ | |
| Save the vocabulary to a JSON file. | |
| Args: | |
| save_directory: Directory to save the vocabulary. | |
| filename_prefix: Optional prefix for the filename. | |
| Returns: | |
| Tuple containing the path to the saved vocabulary file. | |
| """ | |
| if not os.path.isdir(save_directory): | |
| os.makedirs(save_directory, exist_ok=True) | |
| vocab_file = os.path.join( | |
| save_directory, | |
| (filename_prefix + "-" if filename_prefix else "") + "vocab.json", | |
| ) | |
| with open(vocab_file, "w", encoding="utf-8") as f: | |
| json.dump(self._vocab, f, ensure_ascii=False, indent=2) | |
| return (vocab_file,) | |
| def count_vocab_from_dataset( | |
| dataset_name: str = "dlouapre/lichess_2025-01_1M", | |
| split: str = "train", | |
| column: str = "text", | |
| max_samples: Optional[int] = 10000, | |
| ) -> Dict[str, int]: | |
| """ | |
| Count token frequencies in a dataset (useful for vocabulary analysis). | |
| Args: | |
| dataset_name: Name of the dataset on Hugging Face Hub. | |
| split: Dataset split to use. | |
| column: Column containing the game strings. | |
| max_samples: Maximum number of samples to process. | |
| Returns: | |
| Dictionary mapping tokens to their frequencies. | |
| """ | |
| from collections import Counter | |
| from datasets import load_dataset | |
| dataset = load_dataset(dataset_name, split=split) | |
| if max_samples is not None: | |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) | |
| token_counts = Counter() | |
| for example in dataset: | |
| moves = example[column].strip().split() | |
| token_counts.update(moves) | |
| return dict(token_counts) | |