diff --git a/.github/workflows/quality.yml b/.github/workflows/tests.yml
similarity index 74%
rename from .github/workflows/quality.yml
rename to .github/workflows/tests.yml
index 908377273..1d2885ab5 100644
--- a/.github/workflows/quality.yml
+++ b/.github/workflows/tests.yml
@@ -1,4 +1,4 @@
-name: Quality
+name: Tests
on:
push:
@@ -11,8 +11,8 @@ on:
jobs:
- check_code_quality:
- name: Check code quality
+ tests:
+ name: Run tests and quality checks
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -24,8 +24,11 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install ".[quality]"
+ python -m pip install ".[quality,tests]"
- name: Code quality
run: |
make quality
+ - name: Run tests
+ run: |
+ make test
diff --git a/.gitignore b/.gitignore
index d44c47f64..fe9af19a2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -175,4 +175,7 @@ data/
wandb/
logs/
eval_results/
-results/
\ No newline at end of file
+results/
+
+.vscode/
+.python-version
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 200b01f34..112528a19 100644
--- a/Makefile
+++ b/Makefile
@@ -3,17 +3,32 @@
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
export PYTHONPATH = src
-check_dirs := src
+check_dirs := src tests
+
+
+# dev dependencies
+install:
+ uv venv openr1 --python 3.11
+ . openr1/bin/activate && uv pip install --upgrade pip && \
+ uv pip install vllm==0.8.5.post1 && \
+ uv pip install setuptools && \
+ uv pip install flash-attn --no-build-isolation && \
+ GIT_LFS_SKIP_SMUDGE=1 uv pip install -e ".[dev]"
style:
- black --line-length 119 --target-version py310 $(check_dirs) setup.py
+ ruff format --line-length 119 --target-version py310 $(check_dirs) setup.py
isort $(check_dirs) setup.py
quality:
- black --check --line-length 119 --target-version py310 $(check_dirs) setup.py
+ ruff check --line-length 119 --target-version py310 $(check_dirs) setup.py
isort --check-only $(check_dirs) setup.py
flake8 --max-line-length 119 $(check_dirs) setup.py
+test:
+ pytest -sv --ignore=tests/slow/ tests/
+
+slow_test:
+ pytest -sv -vv tests/slow/
# Evaluation
@@ -26,17 +41,13 @@ evaluate:
fi \
),))
$(if $(filter tensor,$(PARALLEL)),export VLLM_WORKER_MULTIPROC_METHOD=spawn &&,) \
- MODEL_ARGS="pretrained=$(MODEL),dtype=float16,$(PARALLEL_ARGS),max_model_length=32768,gpu_memory_utilisation=0.8" && \
- lighteval vllm $$MODEL_ARGS "custom|$(TASK)|0|0" \
- --custom-tasks src/open_r1/evaluate.py \
- --use-chat-template \
- --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
- --output-dir data/evals/$(MODEL)
-
-# Example usage:
-# Single GPU:
-# make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24
-# Data parallel:
-# make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=data NUM_GPUS=8
-# Tensor parallel:
-# make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=tensor NUM_GPUS=8
+ MODEL_ARGS="pretrained=$(MODEL),dtype=bfloat16,$(PARALLEL_ARGS),max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" && \
+ if [ "$(TASK)" = "lcb" ]; then \
+ lighteval vllm $$MODEL_ARGS "extended|lcb:codegeneration|0|0" \
+ --use-chat-template \
+ --output-dir data/evals/$(MODEL); \
+ else \
+ lighteval vllm $$MODEL_ARGS "lighteval|$(TASK)|0|0" \
+ --use-chat-template \
+ --output-dir data/evals/$(MODEL); \
+ fi
diff --git a/README.md b/README.md
index 43deda9e4..adc049bfd 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@
- [SFT](#sft)
- [GRPO](#grpo)
5. [Evaluating models](#evaluating-models)
-6. [Reproducing Deepseek's evaluation results on MATH-500](#reproducing-deepseeks-evaluation-results-on-math-500)
+6. [Reproducing Deepseek's evaluation results](#reproducing-deepseeks-evaluation-results)
7. [Data generation](#data-generation)
- [Generate data from a smol distilled R1 model](#generate-data-from-a-smol-distilled-r1-model)
- [Generate data from DeepSeek-R1](#generate-data-from-deepseek-r1)
@@ -21,10 +21,9 @@
The goal of this repo is to build the missing pieces of the R1 pipeline such that everybody can reproduce and build on top of it. The project is simple by design and mostly consists of:
-- `src/open_r1`: contains the scripts to train and evaluate models as well as generate synthetic data:
+- `src/open_r1`: contains the scripts to train models as well as generate synthetic data:
- `grpo.py`: trains a model with GRPO on a given dataset.
- `sft.py`: performs a simple SFT of a model on a dataset.
- - `evaluate.py`: evaluates a model on the R1 benchmarks.
- `generate.py`: generates synthetic data from a model using [Distilabel](https://github.com/argilla-io/distilabel).
- `Makefile`: contains easy-to-run commands for each step in the R1 pipeline leveraging the scripts above.
@@ -40,33 +39,44 @@ We will use the DeepSeek-R1 [tech report](https://github.com/deepseek-ai/DeepSee
+## News 🗞️
+
+* **🧑🍳 [2025/05/26] (Step 1 completed!)** We release [**Mixture-of-Thoughts**](https://huggingface.co/datasets/open-r1/Mixture-of-Thoughts)--a curated reasoning dataset of 350k verified traces distilled from R1. The dataset spans tasks in mathematics, coding, and science, and is designed to teach language models to reason step-by-step. We also provide a recipe to train [OpenR1-Distill-7B](https://huggingface.co/open-r1/OpenR1-Distill-7B), which replicates the reasoning capabilities of [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) and marks the completion of step 1 in the Open R1 project.
+* **⚡️ [2025/03/11] [(update #3)](https://huggingface.co/blog/open-r1/update-3):** We release the [**CodeForces-CoTs**](https://huggingface.co/datasets/open-r1/codeforces-cots) dataset of 10k competitive programming problems and 100k solutions distilled from R1. We also release IOI24: a new benchmark of _very_ hard problems from international olympiads. A 7B Qwen model trained on CodeForces-CoTs can outperform Claude 3.7 Sonnet on IOI24, while a 32B model can outperform R1 itself.
+* **∞ [2025/02/10] [(update #2)](https://huggingface.co/blog/open-r1/update-2):** We release the [**OpenR1-Math-220k**](https://huggingface.co/datasets/open-r1/OpenR1-Math-220k) dataset of 220k traces distilled from R1 on a new version of NuminaMath. Models trained on this dataset match the performance of DeepSeek's distilled ones.
+* **🔥 [2025/02/02] [(update #1)](https://huggingface.co/blog/open-r1/update-1):** We implement the first parts of the [training](https://github.com/huggingface/open-r1?tab=readme-ov-file#training-models), [inference](https://github.com/huggingface/open-r1?tab=readme-ov-file#data-generation), and [evaluation](https://github.com/huggingface/open-r1?tab=readme-ov-file#reproducing-deepseeks-evaluation-results) pipelines. Let's go!
## Installation
-**Note: Libraries rely on CUDA 12.1. Double check your system if you get segmentation faults.**
+> [!CAUTION]
+> Libraries rely on CUDA 12.4. If you see errors related to segmentation faults, double check the version your system is running with `nvcc --version`.
To run the code in this project, first, create a Python virtual environment using e.g. `uv`.
To install `uv`, follow the [UV Installation Guide](https://docs.astral.sh/uv/getting-started/installation/).
+> [!NOTE]
+> As a shortcut, run `make install` to setup development libraries (spelled out below). Afterwards, if everything is setup correctly you can try out the Open-R1 models.
+
+
```shell
uv venv openr1 --python 3.11 && source openr1/bin/activate && uv pip install --upgrade pip
```
-Next, install vLLM:
+> [!TIP]
+> For Hugging Face cluster users, add `export UV_LINK_MODE=copy` to your `.bashrc` to suppress cache warnings from `uv`
-```shell
-uv pip install vllm>=0.7.0
+Next, install vLLM and FlashAttention:
-# For CUDA 12.1
-pip install vllm>=0.7.0 --extra-index-url https://download.pytorch.org/whl/cu121
-export LD_LIBRARY_PATH=$(python -c "import site; print(site.getsitepackages()[0] + '/nvidia/nvjitlink/lib')"):$LD_LIBRARY_PATH
+```shell
+uv pip install vllm==0.8.5.post1
+uv pip install setuptools && uv pip install flash-attn --no-build-isolation
```
-This will also install PyTorch `v2.5.1` and it is **very important** to use this version since the vLLM binaries are compiled for it. You can then install the remaining dependencies for your specific use case via `pip install -e .[LIST OF MODES]`. For most contributors, we recommend:
+This will also install PyTorch `v2.6.0` and it is **very important** to use this version since the vLLM binaries are compiled for it. You can then install the remaining dependencies for your specific use case via `pip install -e .[LIST OF MODES]`. For most contributors, we recommend:
```shell
-pip install -e ".[dev]"
+GIT_LFS_SKIP_SMUDGE=1 uv pip install -e ".[dev]"
```
Next, log into your Hugging Face and Weights and Biases accounts as follows:
@@ -90,83 +100,378 @@ sudo apt-get install git-lfs
## Training models
-We support training models with either DDP or DeepSpeed (ZeRO-2 and ZeRO-3). To switch between methods, simply change the path to the `accelerate` YAML config in `configs`.
-
> [!NOTE]
> The training commands below are configured for a node of 8 x H100s (80GB). For different hardware and topologies, you may need to tune the batch size and number of gradient accumulation steps.
-### SFT
+We support training models with either DDP or DeepSpeed (ZeRO-2 and ZeRO-3). For example, to perform SFT on a dataset distilled from DeepSeek-R1 with reasoning traces such as [open-r1/Mixture-of-Thoughts](https://huggingface.co/datasets/open-r1/Mixture-of-Thoughts), run:
+
+```shell
+# Train via command line
+accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --model_name_or_path open-r1/Qwen2.5-Math-7B-RoPE-300k \
+ --dataset_name open-r1/Mixture-of-Thoughts \
+ --dataset_config all \
+ --eos_token '<|im_end|>' \
+ --learning_rate 4.0e-5 \
+ --num_train_epochs 5 \
+ --max_seq_length 32768 \
+ --per_device_train_batch_size 2 \
+ --gradient_checkpointing \
+ --bf16 \
+ --use_liger_kernel \
+ --output_dir data/OpenR1-Distill-7B
+
+# Train via YAML config
+accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml
+```
+
+Currently, the following tasks are supported:
-To run SFT on a dataset distilled from DeepSeek-R1 with reasoning traces such as [Bespoke-Stratos-17k](https://huggingface.co/datasets/bespokelabs/Bespoke-Stratos-17k), run:
+* Supervised Fine-Tuning `sft`
+* Group Relative Policy Optimization `grpo`
+
+> [!TIP]
+> If you scale up/down the number of GPUs, we recommend also scaling up the per-device batch size or number of gradient accumulation steps to keep the global batch size constant.
+
+By default, these scripts will push each model to your Hugging Face Hub username, i.e. `{username}/{model_name}-{task}`. You can override the parameters in each YAML config by appending them to the command as follows:
```shell
-accelerate launch --config_file=configs/zero3.yaml src/open_r1/sft.py \
- --model_name_or_path Qwen/Qwen2.5-Math-1.5B-Instruct \
- --dataset_name HuggingFaceH4/Bespoke-Stratos-17k \
- --learning_rate 2.0e-5 \
+# Change the base model to a smaller variant
+accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml \
+ --model_name_or_path Qwen/Qwen3-0.6B-Base \
+ --hub_model_id OpenR1-Distill-0.6B \
+ --output_dir data/OpenR1-Distill-0.6B
+```
+
+If you also wish to override the Weights and Biases default settings, you can do so as follows:
+
+```shell
+accelerate launch --config_file recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml
+ --wandb_entity huggingface --wandb_project open-r1 --run_name Qwen2.5-1.5B-GRPO
+```
+
+**🚨 WARNING 🚨**
+
+Most base models like `meta-llama/Llama-3.2-1B` do not have a chat template, so we set ChatML as the default during training. However, for Qwen base models like `Qwen/Qwen2.5-1.5B`, a chat template is pre-defined in the tokenizer, so the EOS token must be set accordingly, e.g.
+
+```diff
+# Align EOS token with chat template for Qwen base models
+accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --model_name_or_path Qwen/Qwen2.5-1.5B \
++ --eos_token '<|im_end|>'
+ --dataset_name open-r1/Mixture-of-Thoughts \
+ --dataset_config all \
+ --learning_rate 4.0e-5 \
--num_train_epochs 1 \
- --packing \
- --max_seq_length 4096 \
- --per_device_train_batch_size 4 \
- --per_device_eval_batch_size 4 \
- --gradient_accumulation_steps 4 \
+ --max_seq_length 32768 \
+ --per_device_train_batch_size 16 \
--gradient_checkpointing \
--bf16 \
- --logging_steps 5 \
- --eval_strategy steps \
- --eval_steps 100 \
+ --use_liger_kernel \
--output_dir data/Qwen2.5-1.5B-Open-R1-Distill
```
-To launch a Slurm job, run:
+If you wish to use a custom chat template (e.g. Llama or Gemma), then the chat template and associated EOS token must be provided:
+
+```diff
+# Align EOS token with custom chat template
+accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --model_name_or_path meta-llama/Llama-3.2-1B \
++ --chat_template "$(cat llama_chat_template.jinja)" \
++ --eos_token '<|eot_id|>' \
+ --dataset_name open-r1/Mixture-of-Thoughts \
+ --dataset_config all \
+ --learning_rate 4.0e-5 \
+ --num_train_epochs 1 \
+ --max_seq_length 32768 \
+ --per_device_train_batch_size 16 \
+ --gradient_checkpointing \
+ --bf16 \
+ --use_liger_kernel \
+ --output_dir data/Llama-3.2-1B-Open-R1-Distill
+```
+
+### SFT distillation
+
+We provide a recipe to reproduce the reasoning capabilities of [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B), starting from the same base model. To do so, run:
```shell
-sbatch --output=/path/to/logs/%x-%j.out --err=/path/to/logs/%x-%j.err slurm/sft.slurm {model} {dataset} {accelerator}
+ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero3.yaml \
+ src/open_r1/sft.py \
+ --config recipes/OpenR1-Distill-7B/sft/config_distill.yaml
```
-Here `{model}` and `{dataset}` refer to the model and dataset IDs on the Hugging Face Hub, while `{accelerator}` refers to the choice of an 🤗 Accelerate config file in configs.
+The result will be a model like [open-r1/OpenR1-Distill-7B](https://huggingface.co/open-r1/OpenR1-Distill-7B), with the following downstream performance:
+
+| Model | AIME 2024 | MATH-500 | GPQA Diamond | LiveCodeBench v5 |
+|-----------------------------|-----------|----------|--------------|------------------|
+| OpenR1-Distill-7B | 52.7 | 89.0 | 52.8 | 39.4 |
+| DeepSeek-R1-Distill-Qwen-7B | 51.3 | 93.5 | 52.4 | 37.4 |
+
+You can adjust the YAML config to train on a different base model or dataset.
### GRPO
-To train via the GRPO trainer we will use the strategy of using one node to run vLLM for faster generation and the remaining nodes for training. Thus we will use the `configs/zero3.yaml` config and then overwrite the `num_processes=7` for the 8 GPU training scenario. Thus all we need to do is:
+We use TRL's [vLLM backend](https://huggingface.co/docs/trl/speeding_up_training?vllm+examples=GRPO#vllm-for-fast-generation-in-online-methods) to scale training to large models across multiple nodes. For single-node training of smol models across 8 GPUs, use `vllm_mode="colocate"` to run vLLM in the same process as the training script:
```shell
-accelerate launch --config_file configs/zero3.yaml --num_processes=7 src/open_r1/grpo.py \
- --output_dir DeepSeek-R1-Distill-Qwen-7B-GRPO \
- --model_name_or_path deepseek-ai/DeepSeek-R1-Distill-Qwen-7B \
- --dataset_name AI-MO/NuminaMath-TIR \
- --max_prompt_length 512 \
- --max_completion_length 1024 \
- --per_device_train_batch_size 1 \
- --gradient_accumulation_steps 16 \
- --logging_steps 10 \
- --bf16 \
- --use_vllm \
- --vllm_device auto \
- --vllm_gpu_memory_utilization 0.7
+ACCELERATE_LOG_LEVEL=info \
+ accelerate launch --config_file recipes/accelerate_configs/zero3.yaml \
+ src/open_r1/grpo.py --config recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml \
+ --vllm_mode colocate
+```
+
+> [!WARNING]
+> The chat template used in the distilled DeepSeek models omits the contents of the reasoning block within the `` and `` tags. It also prefills the assistant response with `` which interferes with the format reward function. To handle that, it is important to override the chat template as done in e.g. [recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml](./recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml).
+
+For multi-node training on N+1 nodes, with 1 node running the vLLM server and N nodes running training, we provide an example Slurm script. For example, to run the above example on 1+1 nodes with data parallelism, run:
+
+```shell
+sbatch --nodes=2 slurm/train.slurm --model Qwen2.5-1.5B-Instruct --task grpo --config demo --accelerator zero2 --dp 8 --tp 1
+```
+
+See the [Launching jobs on a Slurm cluster](#launching-jobs-on-a-slurm-cluster) section for more details.
+
+#### GRPO dataset filtering
+
+We provide support to filter datasets by generating and computing pass rate on veriable tasks, see this [README](scripts/pass_rate_filtering/README.md)
+
+#### 👨💻 Training with a code interpreter
+
+We provide a `code` reward function for executing code generated by the policy during training. Currently, this reward function targets code contests like [Codeforces](https://codeforces.com), where solutions are executed against a set of test cases and the overall success rate is returned as the final reward. To ensure safe execution, we support multiple sandbox providers:
+
+1. [E2B](https://e2b.dev) - Fast, cloud-based sandboxes with focus on Python execution
+2. [Morph](https://cloud.morph.so/web/) - Cloud-based sandboxes with broader language support - Python/JS/C++/Rust
+
+To use the code reward function, first install the necessary dependencies:
+
+```shell
+uv pip install -e '.[code]'
+```
+
+##### E2B Provider
+
+To use E2B sandboxes, create a `.env` file and add your E2B API token:
+
+```
+E2B_API_KEY="e2b_xxx"
+```
+
+##### Morph Provider
+
+To use Morph, first install the morphcloud package:
+
+```shell
+pip install morphcloud
+```
+
+Then add your Morph API token to the `.env` file:
+
+```
+MORPH_API_KEY="YOUR_MORPH_API_KEY"
+```
+
+To specify which provider to use, add the `provider_type` parameter in your configuration:
+
+```yaml
+# For E2B
+provider_type: e2b
+
+# For Morph
+provider_type: morph
+```
+
+##### Dataset Requirements
+
+Make sure your dataset contains a `verification_info` column with the following schema (adopted from PrimeIntellect's excellent [datasets](https://huggingface.co/collections/PrimeIntellect/synthetic-1-67a2c399cfdd6c9f7fae0c37) of verifiable problems):
+
+```python
+{
+ "language": "python", # Morph supports more languages including C++, Java, etc.
+ "test_cases": [
+ {
+ "input": "4\n4\n0001\n1000\n0011\n0111\n3\n010\n101\n0\n2\n00000\n00001\n4\n01\n001\n0001\n00001\n",
+ "output": "1\n3 \n-1\n0\n\n2\n1 2 \n",
+ "type": "stdin_stdout",
+ }
+ ],
+}
+```
+
+For example, to train a smol model on Python problems, start the vLLM server:
+
+```shell
+CUDA_VISIBLE_DEVICES=0 trl vllm-serve --model Qwen/Qwen2.5-1.5B-Instruct
+```
+
+Then run training with:
+
+```shell
+CUDA_VISIBLE_DEVICES=1,2,3,4,5,6,7 ACCELERATE_LOG_LEVEL=info \
+ accelerate launch --config_file recipes/accelerate_configs/zero2.yaml --num_processes=7 \
+ src/open_r1/grpo.py --config recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code.yaml
+```
+
+##### Using Router Services
+
+It is possible to be rate limited when too many scripts are executed on sandbox services. For both providers, we offer router scripts that can be launched on a CPU node:
+
+For E2B:
+```shell
+sbatch slurm/e2b_router.slurm
+```
+
+For Morph:
+```shell
+sbatch slurm/morph_router.slurm
+```
+
+Then add the router URL in your training YAML config:
+```yaml
+# For E2B
+e2b_router_url: 1.2.3.4:8000
+
+# For Morph
+morph_router_url: 1.2.3.4:8000
```
-To launch a Slurm job, run:
+The port should match the one used when launching the router.
+All training jobs can share the same router IP which will ensure parallel executions are properly managed.
+
+#### Competitive Programming problems: IOI & CodeForces
+
+We provide `ioi_code_reward` and `cf_code_reward` reward functions for executing problems from [IOI](https://hf.co/datasets/open-r1/ioi) and [CodeForces](https://huggingface.co/datasets/open-r1/codeforces), respectively. You can use either [piston](https://github.com/engineer-man/piston) or Morph (currently IOI only) as your execution provider.
+
+##### Piston
+
+To use Piston:
+1. Get piston workers running, see [slurm/piston/README.md](./slurm/piston/README.md)
+2. Set your environment variable `PISTON_ENDPOINTS` to `slurm` or to a list of piston worker endpoints
+
+For IOI:
+
+3. In your configuration, use `ioi_provider: "piston"`
+
+For CodeForces:
+
+3. Download the generated (hard) test cases:
+```
+# change PATH_TO_SAVE_TESTCASES. Increase --max-workers according to your machine's capacity
+huggingface-cli download open-r1/codeforces --repo-type=dataset --include='generated_tests/*.parquet' --max-workers=8 --local-dir PATH_TO_SAVE_TESTCASES
+```
+4. Save the path in .env:
+```
+CF_TESTS_FOLDER=PATH_TO_SAVE_TESTCASES
+```
+
+##### Morph
+
+Morph is a cloud-based solution that provides sandboxed environments for running code. To use it:
+1. Install the Morph client: `pip install morphcloud`
+2. Add your Morph API key to the `.env` file: `MORPH_API_KEY="your_key_here"`
+3. In your configuration, use `ioi_provider: "morph"`
+
+##### Example recipes
+For IOI:
+
+See the [example recipe](./recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml) for how to use the IOI reward function:
```shell
-sbatch --output=/path/to/logs/%x-%j.out --err=/path/to/logs/%x-%j.err slurm/grpo.slurm {model} {dataset} {accelerator}
+ACCELERATE_LOG_LEVEL=info accelerate launch --config_file recipes/accelerate_configs/zero2.yaml \
+ --num_processes=7 src/open_r1/grpo.py \
+ --config recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml
```
+For CodeForces:
+
+```shell
+sbatch --job-name=cf-grpo --nodes=2 slurm/train.slurm --model Qwen2.5-Coder-7B-Instruct --task grpo --config codeforces --accelerator zero3 --dp 8 --tp 1
+```
+
+### Launching jobs on a Slurm cluster
+
+If you have access to a Slurm cluster, we provide a `slurm/train.slurm` script that will automatically queue training jobs for you. Here's how you can use it:
+
+```shell
+sbatch --job-name=open_r1 --nodes=1 slurm/train.slurm --model {model_name} --task {task} --config {config_suffix} --accelerator {accelerator}
+```
+
+Here `{model_name}` and `{task}` are defined as above, while `{config_suffix}` refers to the specific config and `{accelerator}` refers to the choice of 🤗 Accelerate config in `recipes/accelerate_configs`. If you wish to override the default config parameters, you can provide them by appending a space-separated string like `'--arg1=value1 --arg2=value2'`. Here's a concrete example to run SFT on 1 node of 8 GPUs:
+
+```shell
+sbatch --job-name=open_r1 --nodes=1 slurm/train.slurm --model OpenR1-Distill-7B --task sft --config distill --accelerator zero3
+```
+
+You can scale the number of nodes by increasing the `--nodes` flag.
+
+For GRPO, we use 1 node for the vLLM server and N nodes for training. For example, to run GRPO on 1+1 nodes with mixed data and tensor parallelism, run:
+
+```shell
+sbatch --job-name=open_r1 --nodes=2 slurm/train.slurm --model Qwen2.5-1.5B-Instruct --task grpo --config demo --accelerator zero2 --dp 4 --tp 2
+```
+
+> [!NOTE]
+> The configuration in `slurm/train.slurm` is optimised for the Hugging Face Compute Cluster and may require tweaking to be adapted to your own compute nodes.
+
+### Customising the dataset mixture
+
+To combine multiple datasets as a single training mixture, you can specify the `dataset_mixture` parameter in the YAML config file. Here's a template for how to do this:
+
+```yaml
+dataset_mixture:
+ datasets: # List of datasets to include in the mixture
+ - id: dataset_1 # Hub dataset ID
+ config: config_name_1 # Name of the dataset config
+ split: split_1 # Split to use from the dataset
+ columns: # Columns to keep
+ - column_1
+ - column_2
+ weight: 0.25 # Fraction of dataset to use
+ - id: dataset_2
+ config: config_name_2
+ split: split_2
+ columns:
+ - column_1
+ - column_2
+ weight: 0.5
+ seed: 42 # Seed for shuffling the combined dataset
+ test_split_size: 0.1 # Fraction of mixture to use for a test split
+```
## Evaluating models
-We use `lighteval` to evaluate models, with custom tasks defined in `src/open_r1/evaluate.py`. For models which fit on a single GPU, run:
+We use `lighteval` to evaluate models. For models which fit on a single GPU, run:
```shell
+export VLLM_WORKER_MULTIPROC_METHOD=spawn # Required for vLLM
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
-MODEL_ARGS="pretrained=$MODEL,dtype=float16,max_model_length=32768,gpu_memory_utilisation=0.8"
-TASK=aime24
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
OUTPUT_DIR=data/evals/$MODEL
-lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
- --custom-tasks src/open_r1/evaluate.py \
+# AIME 2024
+TASK=aime24
+lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
+
+# MATH-500
+TASK=math_500
+lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
+
+# GPQA Diamond
+TASK=gpqa:diamond
+lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
+
+# LiveCodeBench
+lighteval vllm $MODEL_ARGS "extended|lcb:codegeneration|0|0" \
--use-chat-template \
- --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
--output-dir $OUTPUT_DIR
```
@@ -175,15 +480,13 @@ To increase throughput across multiple GPUs, use _data parallel_ as follows:
```shell
NUM_GPUS=8
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
-MODEL_ARGS="pretrained=$MODEL,dtype=float16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
TASK=aime24
OUTPUT_DIR=data/evals/$MODEL
-lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
- --custom-tasks src/open_r1/evaluate.py \
+lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \
--use-chat-template \
- --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
- --output-dir $OUTPUT_DIR
+ --output-dir $OUTPUT_DIR
```
For large models which require sharding across GPUs, use _tensor parallel_ and run:
@@ -191,58 +494,175 @@ For large models which require sharding across GPUs, use _tensor parallel_ and r
```shell
NUM_GPUS=8
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
-MODEL_ARGS="pretrained=$MODEL,dtype=float16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
TASK=aime24
OUTPUT_DIR=data/evals/$MODEL
export VLLM_WORKER_MULTIPROC_METHOD=spawn
-lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
- --custom-tasks src/open_r1/evaluate.py \
+lighteval vllm $MODEL_ARGS "lighteval|$TASK|0|0" \
--use-chat-template \
- --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
- --output-dir $OUTPUT_DIR
+ --output-dir $OUTPUT_DIR
```
You can also launch an evaluation with `make evaluate`, specifying the model, task, and optionally the parallelism technique and number of GPUs.
To evaluate on a single GPU:
+
```shell
make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24
```
To use Data Parallelism:
+
```shell
make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=data NUM_GPUS=8
```
To use Tensor Parallelism:
+
```shell
make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=tensor NUM_GPUS=8
```
-## Reproducing Deepseek's evaluation results on MATH-500
-We are able to reproduce Deepseek's reported results on the MATH-500 Benchmark:
-| Model | MATH-500 (HF lighteval) | MATH-500 (DeepSeek Reported) |
-| :-------------------------- | :-------: | :----------------------------: |
-| DeepSeek-R1-Distill-Qwen-1.5B | 81.6 | 83.9 |
-| DeepSeek-R1-Distill-Qwen-7B | 91.8 | 92.8 |
-| DeepSeek-R1-Distill-Qwen-14B | 94.2 | 93.9 |
-| DeepSeek-R1-Distill-Qwen-32B | 95.0 | 94.3 |
-| DeepSeek-R1-Distill-Llama-8B | 85.8 | 89.1 |
-| DeepSeek-R1-Distill-Llama-70B | 93.4 | 94.5 |
+
+## Reproducing Deepseek's evaluation results
+
+The DeepSeek-R1 paper uses sampling with 4-64 responses per query to estimate `pass@1` accuracy, but does not specify the specific number of responses per benchmark. In the tables below, we estimate `pass@1` accuracy with the following number of responses per query:
+
+| Benchmark | Number of responses per query |
+|:-------------:|:-----------------------------:|
+| AIME 2024 | 64 |
+| MATH-500 | 4 |
+| GPQA Diamond | 8 |
+| LiveCodeBench | 16 |
+Note that for benchmarks like AIME24, it is important to sample many responses as there are only 30 problems and this can introduce high variance across repeated runs. The choice of how many responses to sample per prompt likely explains the small differences between our evaluation results and those reported by DeepSeek.
+
+### AIME 2024
+
+We are able to reproduce Deepseek's reported results on the AIME 2024 benchmark within ~1-3 standard deviations:
+
+| Model | AIME 2024 (🤗 LightEval) | AIME 2024 (DeepSeek Reported) |
+|:------------------------------|:------------------------:|:-----------------------------:|
+| DeepSeek-R1-Distill-Qwen-1.5B | 30.7 | 28.9 |
+| DeepSeek-R1-Distill-Qwen-7B | 50.8 | 55.5 |
+| DeepSeek-R1-Distill-Qwen-14B | 65.9 | 69.7 |
+| DeepSeek-R1-Distill-Qwen-32B | 69.7 | 72.6 |
+| DeepSeek-R1-Distill-Llama-8B | 43.9 | 41.7 |
+| DeepSeek-R1-Distill-Llama-70B | 63.0 | 70.0 |
To reproduce these results use the following command:
+
```shell
-sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B math_500
-sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Qwen-7B math_500
-sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Qwen-14B math_500
-sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Qwen-32B math_500 tp
-sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Llama-8B math_500
-sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Llama-70B math_500 tp
+NUM_GPUS=1 # Set to 8 for 32B and 70B models
+MODEL=deepseek-ai/{model_name}
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,data_parallel_size=$NUM_GPUS,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
+OUTPUT_DIR=data/evals/$MODEL
+
+lighteval vllm $MODEL_ARGS "lighteval|aime24|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
```
+Alternatively, you can launch Slurm jobs as follows:
+```shell
+python scripts/run_benchmarks.py --model-id {model_id} --benchmarks aime24
+```
+
+### MATH-500
+
+We are able to reproduce Deepseek's reported results on the MATH-500 benchmark within ~1-3 standard deviations:
+
+| Model | MATH-500 (🤗 LightEval) | MATH-500 (DeepSeek Reported) |
+|:------------------------------|:-----------------------:|:----------------------------:|
+| DeepSeek-R1-Distill-Qwen-1.5B | 83.1 | 83.9 |
+| DeepSeek-R1-Distill-Qwen-7B | 94.5 | 92.8 |
+| DeepSeek-R1-Distill-Qwen-14B | 94.1 | 93.9 |
+| DeepSeek-R1-Distill-Qwen-32B | 95.6 | 94.3 |
+| DeepSeek-R1-Distill-Llama-8B | 88.6 | 89.1 |
+| DeepSeek-R1-Distill-Llama-70B | 95.1 | 94.5 |
+
+To reproduce these results use the following command:
+
+```shell
+export VLLM_WORKER_MULTIPROC_METHOD=spawn
+NUM_GPUS=1 # Set to 8 for 32B and 70B models
+MODEL=deepseek-ai/{model_name}
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,data_parallel_size=$NUM_GPUS,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
+OUTPUT_DIR=data/evals/$MODEL
+
+lighteval vllm $MODEL_ARGS "lighteval|math_500|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
+```
+
+Alternatively, you can launch Slurm jobs as follows:
+
+```shell
+python scripts/run_benchmarks.py --model-id {model_id} --benchmarks math_500
+```
+
+### GPQA Diamond
+
+We are able to reproduce Deepseek's reported results on the GPQA Diamond benchmark within ~1-3 standard deviations:
+
+| Model | GPQA Diamond (🤗 LightEval) | GPQA Diamond (DeepSeek Reported) |
+|:------------------------------|:---------------------------:|:--------------------------------:|
+| DeepSeek-R1-Distill-Qwen-1.5B | 35.8 | 33.8 |
+| DeepSeek-R1-Distill-Qwen-7B | 50.5 | 49.1 |
+| DeepSeek-R1-Distill-Qwen-14B | 61.5 | 59.1 |
+| DeepSeek-R1-Distill-Qwen-32B | 63.1 | 62.1 |
+| DeepSeek-R1-Distill-Llama-8B | 46.7 | 49.0 |
+| DeepSeek-R1-Distill-Llama-70B | 67.4 | 65.2 |
+
+To reproduce these results use the following command:
+
+```shell
+export VLLM_WORKER_MULTIPROC_METHOD=spawn
+NUM_GPUS=1 # Set to 8 for 32B and 70B models
+MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
+OUTPUT_DIR=data/evals/$MODEL
+
+lighteval vllm $MODEL_ARGS "lighteval|gpqa:diamond|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
+```
+
+```shell
+python scripts/run_benchmarks.py --model-id {model_id} --benchmarks gpqa
+```
+
+### LiveCodeBench
+
+We are able to reproduce Deepseek's reported results on the LiveCodeBench code generation benchmark within ~1-3 standard deviations:
+
+| Model | LiveCodeBench (🤗 LightEval) | LiveCodeBench (DeepSeek Reported) |
+|:------------------------------|:----------------------------:|:---------------------------------:|
+| DeepSeek-R1-Distill-Qwen-1.5B | 16.1 | 16.9 |
+| DeepSeek-R1-Distill-Qwen-7B | 37.4 | 37.6 |
+| DeepSeek-R1-Distill-Qwen-14B | 51.3 | 53.1 |
+| DeepSeek-R1-Distill-Qwen-32B | 56.0 | 57.2 |
+| DeepSeek-R1-Distill-Llama-8B | 37.4 | 39.6 |
+| DeepSeek-R1-Distill-Llama-70B | 55.9 | 57.5 |
+
+To reproduce these results use the following command:
+
+```shell
+NUM_GPUS=1 # Set to 8 for 32B and 70B models, or data_parallel_size=8 with the smaller models for speed
+MODEL=deepseek-ai/{model_name}
+MODEL_ARGS="model_name=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,data_parallel_size=$NUM_GPUS,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
+OUTPUT_DIR=data/evals/$MODEL
+
+lighteval vllm $MODEL_ARGS "extended|lcb:codegeneration|0|0" \
+ --use-chat-template \
+ --output-dir $OUTPUT_DIR
+```
+
+```shell
+python scripts/run_benchmarks.py --model-id {model_id} --benchmarks lcb
+```
## Data generation
@@ -331,6 +751,56 @@ sbatch slurm/generate.slurm \
> [!NOTE]
> While the job is running, you can setup an SSH tunnel through the cluster login node to access the Ray dashboard from your computer running `ssh -L 8265:ray_ip_head_node:8265 `, then browsing `http://localhost:8265`
+
+### Data decontamination
+
+Following [s1: Simple test-time scaling](https://huggingface.co/papers/2501.19393) the data can be decontaminated using the script at: [scripts/decontaminate.py](./scripts/decontaminate.py), which decontaminates a dataset using 8-grams and deduplicate the data. Sample run:
+
+```shell
+python scripts/decontaminate.py \
+ --dataset "open-r1/verifiable-coding-problems-python" \
+ --problem_column problem \
+ --cleanup
+```
+
+It will decontaminate against the benchmark datasets, and remove the contaminated samples afterwards. If no argument `--new_dataset_name` is provided, the same dataset will be reused, adding a `_decontaminated`. It runs against the prompt, which for this dataset is the column `problem`, but a different one can be provided.
+
+Arguments for the script:
+
+```shell
+usage: decontaminate.py [-h] --dataset DATASET [--split SPLIT] [--ngram_size NGRAM_SIZE] [--problem_column PROBLEM_COLUMN] [--cleanup] [--new_dataset_name NEW_DATASET_NAME]
+
+options:
+ -h, --help show this help message and exit
+ --dataset DATASET Name of the dataset to check for contamination.
+ --split SPLIT Split to check for contamination, defaults to `train`.
+ --ngram_size NGRAM_SIZE
+ Size of n-grams to build, defaults to 8.
+ --problem_column PROBLEM_COLUMN
+ Name of the column containing the problem (prompt).
+ --cleanup Whether to remove the contaminated rows before pushing the dataset.
+ --new_dataset_name NEW_DATASET_NAME
+ New name for the dataset. If not provided, will reuse the name and add a `_decontaminated` to the name.
+```
+
## Contributing
Contributions are welcome. Please refer to https://github.com/huggingface/open-r1/issues/23.
+
+## Acknowledgements
+
+This project is built with the collective efforts of many groups and individuals in the open AI community. We are especially grateful to the vLLM and SGLang teams for creating high-performance tooling to scale the rollouts of GRPO. We also thank the teams at [OpenThoughts](https://www.open-thoughts.ai), [Prime Intellect](https://www.primeintellect.ai), and [General Reasoning](https://gr.inc) for creating and sharing high-quality datasets for reasoning.
+
+## Citation
+
+If you find this project is useful in your own work, please consider citing as follows:
+
+```
+@misc{openr1,
+ title = {Open R1: A fully open reproduction of DeepSeek-R1},
+ url = {https://github.com/huggingface/open-r1},
+ author = {{Hugging Face}},
+ month = {January},
+ year = {2025}
+}
+```
diff --git a/logs/.gitkeep b/logs/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml b/recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml
new file mode 100644
index 000000000..639389cc2
--- /dev/null
+++ b/recipes/DeepSeek-R1-Distill-Qwen-1.5B/grpo/config_demo.yaml
@@ -0,0 +1,58 @@
+# Model arguments
+model_name_or_path: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+# We edit the DeepSeek chat template to ensure (a) the reasoning block within and is included in the completion and (b) the tag is not part of the prefill so that the format reward works
+chat_template: "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}"
+dataset_name: open-r1/OpenR1-Math-220k
+dataset_prompt_column: problem
+system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: \n...\n\n\n...\n"
+
+# GRPO trainer config
+bf16: true
+use_vllm: true
+do_eval: false
+gradient_accumulation_steps: 4
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: DeepSeek-R1-Distill-Qwen-1.5B-GRPO
+hub_strategy: every_save
+learning_rate: 1.0e-06
+log_completions: true
+log_level: info
+logging_first_step: true
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine_with_min_lr
+lr_scheduler_kwargs:
+ min_lr_rate: 0.1
+max_prompt_length: 512
+max_completion_length: 2048
+max_steps: -1
+num_generations: 16
+num_train_epochs: 1
+output_dir: data/DeepSeek-R1-Distill-Qwen-1.5B-GRPO
+overwrite_output_dir: true
+per_device_eval_batch_size: 16
+per_device_train_batch_size: 16
+push_to_hub: true
+report_to:
+- wandb
+reward_funcs:
+- accuracy
+- format
+- tag_count
+reward_weights:
+- 1.0
+- 1.0
+- 1.0
+save_strategy: "epoch"
+save_total_limit: 1
+seed: 42
+temperature: 0.7
+use_liger_kernel: true
+warmup_ratio: 0.1
diff --git a/recipes/OlympicCoder-32B/sft/config_v00.00.yaml b/recipes/OlympicCoder-32B/sft/config_v00.00.yaml
new file mode 100644
index 000000000..754b78f6d
--- /dev/null
+++ b/recipes/OlympicCoder-32B/sft/config_v00.00.yaml
@@ -0,0 +1,49 @@
+# Config for 16 nodes of 8 H100s with FSDP1
+# Model arguments
+model_name_or_path: Qwen/Qwen2.5-Coder-32B-Instruct
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+dataset_name: open-r1/codeforces-cots
+dataset_config: solutions_decontaminated
+dataset_num_proc: 12
+
+# SFT trainer config
+bf16: true
+do_eval: false
+eval_strategy: 'no'
+gradient_accumulation_steps: 1
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_always_push: true
+hub_model_id: OlympicCoder-32B
+hub_strategy: every_save
+learning_rate: 4.0e-05
+log_level: info
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine_with_min_lr
+lr_scheduler_kwargs:
+ min_lr_rate: 0.1
+packing: false
+max_grad_norm: 0.2
+max_length: 22528 # we were unable to train at 32k due to OOM. See https://github.com/huggingface/transformers/issues/35983 for context parallelism support.
+max_steps: -1
+num_train_epochs: 10
+optim: paged_adamw_8bit
+output_dir: data/OlympicCoder-32B
+overwrite_output_dir: true
+per_device_eval_batch_size: 1
+per_device_train_batch_size: 1
+push_to_hub: true
+report_to:
+- wandb
+save_only_model: true # needed to bypass FSDP errors with saving paged optimizers
+save_strategy: epoch
+save_total_limit: 1
+seed: 42
+use_liger_kernel: false # fails on multi-node
+warmup_ratio: 0.03
\ No newline at end of file
diff --git a/recipes/OlympicCoder-7B/sft/config_v00.00.yaml b/recipes/OlympicCoder-7B/sft/config_v00.00.yaml
new file mode 100644
index 000000000..dd0be5d96
--- /dev/null
+++ b/recipes/OlympicCoder-7B/sft/config_v00.00.yaml
@@ -0,0 +1,46 @@
+# Config for 1 node of 8 H100s with DeepSpeed ZeRO-3
+# Model arguments
+model_name_or_path: Qwen/Qwen2.5-Coder-7B-Instruct
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+dataset_name: open-r1/codeforces-cots
+dataset_config: solutions_decontaminated
+dataset_num_proc: 48
+
+# SFT trainer config
+bf16: true
+do_eval: false
+eval_strategy: 'no'
+gradient_accumulation_steps: 8
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: open-r1/OlympicCoder-7B
+hub_strategy: every_save
+learning_rate: 1.0e-05
+log_level: info
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine_with_min_lr
+lr_scheduler_kwargs:
+ min_lr_rate: 0.1
+packing: false
+max_grad_norm: 0.2
+max_length: 32768
+max_steps: -1
+num_train_epochs: 10
+output_dir: data/OlympicCoder-7B
+overwrite_output_dir: true
+per_device_eval_batch_size: 1
+per_device_train_batch_size: 2
+push_to_hub: true
+report_to:
+- wandb
+save_strategy: epoch
+save_total_limit: 1
+seed: 42
+use_liger_kernel: true
+warmup_ratio: 0.03
\ No newline at end of file
diff --git a/recipes/OpenR1-Distill-7B/sft/config_distill.yaml b/recipes/OpenR1-Distill-7B/sft/config_distill.yaml
new file mode 100644
index 000000000..44d9c09f6
--- /dev/null
+++ b/recipes/OpenR1-Distill-7B/sft/config_distill.yaml
@@ -0,0 +1,48 @@
+# Config for 1 node of 8 x H100s (80GB)
+# Model arguments
+model_name_or_path: open-r1/Qwen2.5-Math-7B-RoPE-300k
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+chat_template: "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Open-R1, a language model trained by Hugging Face to help users. Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: Thought section Solution section. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Open-R1, a language model trained by Hugging Face to help users. Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: Thought section Solution section. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- message.content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"
+dataset_name: open-r1/Mixture-of-Thoughts
+dataset_config: all
+dataset_num_proc: 12
+eos_token: <|im_end|>
+
+# SFT trainer config
+bf16: true
+do_eval: false
+eval_strategy: 'no'
+gradient_accumulation_steps: 8
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: OpenR1-Distill-7B
+hub_strategy: every_save
+learning_rate: 4.0e-05
+log_level: info
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine_with_min_lr
+lr_scheduler_kwargs:
+ min_lr_rate: 0.1
+packing: false
+max_grad_norm: 0.2
+max_length: 32768
+max_steps: -1
+num_train_epochs: 5
+output_dir: data/OpenR1-Distill-7B
+overwrite_output_dir: true
+per_device_eval_batch_size: 1
+per_device_train_batch_size: 2
+push_to_hub: true
+report_to:
+- wandb
+save_strategy: epoch
+save_total_limit: 1
+seed: 42
+use_liger_kernel: true
+warmup_ratio: 0.03
\ No newline at end of file
diff --git a/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo.yaml b/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo.yaml
new file mode 100644
index 000000000..d1a2a6bce
--- /dev/null
+++ b/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo.yaml
@@ -0,0 +1,52 @@
+# Model arguments
+model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+dataset_name: open-r1/OpenR1-Math-220k
+dataset_prompt_column: problem
+system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: \n...\n\n\n...\n"
+
+# GRPO trainer config
+bf16: true
+use_vllm: true
+do_eval: false
+gradient_accumulation_steps: 4
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: Qwen2.5-1.5B-Open-R1-GRPO
+hub_strategy: every_save
+learning_rate: 2.0e-05
+log_completions: true
+log_level: info
+logging_first_step: true
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine
+max_prompt_length: 512
+max_completion_length: 1024
+max_steps: -1
+num_generations: 16
+num_train_epochs: 1
+output_dir: data/Qwen2.5-1.5B-Open-R1-GRPO
+overwrite_output_dir: true
+per_device_eval_batch_size: 16
+per_device_train_batch_size: 16
+push_to_hub: true
+report_to:
+- wandb
+reward_funcs:
+- accuracy
+- format
+- tag_count
+reward_weights:
+- 1.0
+- 1.0
+- 1.0
+save_strategy: "epoch"
+save_total_limit: 1
+seed: 42
+warmup_ratio: 0.1
diff --git a/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code.yaml b/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code.yaml
new file mode 100644
index 000000000..1c694b1cc
--- /dev/null
+++ b/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code.yaml
@@ -0,0 +1,54 @@
+# Model arguments
+model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+dataset_name: open-r1/verifiable-coding-problems-python
+dataset_prompt_column: problem_statement
+system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: \n...\n\n\n...\n"
+
+# GRPO trainer config
+beta: 0.01
+bf16: true
+use_vllm: true
+do_eval: false
+gradient_accumulation_steps: 4
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: Qwen2.5-1.5B-Open-R1-Code-GRPO
+hub_strategy: every_save
+learning_rate: 5.0e-06
+log_completions: true
+log_level: info
+logging_first_step: true
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine_with_min_lr
+lr_scheduler_kwargs:
+ min_lr_rate: 0.1
+max_prompt_length: 1024
+max_completion_length: 2048
+max_steps: 500
+num_generations: 14
+num_train_epochs: 1
+output_dir: data/Qwen2.5-1.5B-Open-R1-Code-GRPO
+overwrite_output_dir: true
+per_device_train_batch_size: 16
+push_to_hub: true
+report_to:
+- wandb
+reward_funcs:
+- code
+- format
+reward_weights:
+- 1.0
+- 0.1
+save_strategy: "steps"
+save_steps: 50
+save_total_limit: 1
+seed: 42
+temperature: 1.0
+warmup_ratio: 0.03
\ No newline at end of file
diff --git a/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml b/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml
new file mode 100644
index 000000000..7ec23c6f1
--- /dev/null
+++ b/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo_code_ioi.yaml
@@ -0,0 +1,61 @@
+# Model arguments
+model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+dataset_name: open-r1/ioi
+dataset_prompt_column: problem
+system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: \n...\n\n\n...\n"
+
+# GRPO trainer config
+beta: 0.01
+bf16: true
+use_vllm: true
+do_eval: false
+gradient_accumulation_steps: 4
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: Qwen2.5-1.5B-Open-R1-Code-GRPO
+hub_strategy: every_save
+learning_rate: 5.0e-06
+log_completions: true
+log_level: info
+logging_first_step: true
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: cosine_with_min_lr
+lr_scheduler_kwargs:
+ min_lr_rate: 0.1
+max_prompt_length: 1024
+max_completion_length: 2048
+max_steps: 500
+num_generations: 14
+num_train_epochs: 1
+output_dir: data/Qwen2.5-1.5B-Open-R1-Code-GRPO
+overwrite_output_dir: true
+per_device_train_batch_size: 16
+push_to_hub: true
+report_to:
+- wandb
+save_strategy: "steps"
+save_steps: 50
+save_total_limit: 1
+seed: 42
+temperature: 1.0
+warmup_ratio: 0.03
+# ioi specific config
+code_language: cpp
+reward_funcs:
+- ioi_code
+- code_format
+- format
+reward_weights:
+- 1.0
+- 0.1
+- 0.1
+# for each generation, evaluate these many test cases in parallel, then check if any of them failed (0 score): if so stop evaluating
+# otherwise continue with the next batch of test cases. Useful to avoid overloading the eval server + save time on wrong solutions
+code_eval_test_batch_size: 3
\ No newline at end of file
diff --git a/recipes/Qwen2.5-Coder-7B-Instruct/grpo/config_codeforces.yaml b/recipes/Qwen2.5-Coder-7B-Instruct/grpo/config_codeforces.yaml
new file mode 100644
index 000000000..cc6f95c64
--- /dev/null
+++ b/recipes/Qwen2.5-Coder-7B-Instruct/grpo/config_codeforces.yaml
@@ -0,0 +1,80 @@
+# Model arguments
+model_name_or_path: Qwen/Qwen2.5-Coder-7B-Instruct
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+# Data training arguments
+dataset_name: open-r1/codeforces
+dataset_prompt_column: prompt
+dataset_config: verifiable-prompts
+dataset_test_split: test
+dataset_train_split: train
+
+system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: \n...\n\n\n...\n"
+
+# GRPO trainer config
+callbacks:
+- push_to_hub_revision
+benchmarks:
+- lcb_v4
+beta: 0.0
+loss_type: dr_grpo
+scale_rewards: false
+bf16: true
+do_eval: false
+eval_strategy: "no"
+use_vllm: true
+vllm_device: auto
+vllm_gpu_memory_utilization: 0.7
+gradient_accumulation_steps: 32
+gradient_checkpointing: true
+gradient_checkpointing_kwargs:
+ use_reentrant: false
+hub_model_id: open-r1/Qwen2.5-Coder-7B-Instruct-Codeforces-GRPO
+hub_model_revision: v01.00
+hub_strategy: every_save
+learning_rate: 1.0e-06
+log_completions: true
+log_level: info
+logging_first_step: true
+logging_steps: 1
+logging_strategy: steps
+lr_scheduler_type: constant_with_warmup
+max_grad_norm: 0.2
+max_prompt_length: 2000
+max_completion_length: 8192
+max_steps: -1
+num_generations: 16
+# aiming for 1k optimization steps
+# total_samples_per_batch = num_gpus * grad_accumulation_steps * per_device_batch_size = 8 * 32 * 4 = 1024
+# unique_prompts_per_batch = total_samples_per_batch / num_generations = 1024 / 16 = 64
+# #dataset ~= 16k (8k * 2, for python and cpp)
+# global_steps_per_epoch = #dataset / unique_prompts_per_batch = 16k / 64 ~= 250
+# epochs_for_1k_steps = 1000/250 = 4 epochs
+num_train_epochs: 4
+output_dir: data/Qwen2.5-Coder-7B-Instruct-Codeforces-GRPO_v01.00
+overwrite_output_dir: true
+per_device_train_batch_size: 4
+push_to_hub: true
+report_to:
+- wandb
+reward_funcs:
+- cf_code
+- code_format
+reward_weights:
+- 1.0
+- 0.1
+save_strategy: "steps"
+save_steps: 0.05
+save_total_limit: 1
+seed: 42
+temperature: 0.7
+wandb_entity: huggingface
+wandb_project: open-r1
+warmup_ratio: 0.1
+
+mask_truncated_completions: true
+# for each generation, evaluate these many test cases in parallel, then check if any of them failed (0 score): if so stop evaluating
+# otherwise continue with the next batch of test cases. Useful to avoid overloading the eval server + save time on wrong solutions
+code_eval_test_batch_size: -1
+code_eval_scoring_mode: weighted_sum
\ No newline at end of file
diff --git a/recipes/README.md b/recipes/README.md
new file mode 100644
index 000000000..4301fd66e
--- /dev/null
+++ b/recipes/README.md
@@ -0,0 +1,23 @@
+# Post-training recipes
+
+## OpenR1 Distill 7B
+
+To train the OpenR1 Distill 7B model, run:
+
+```
+sbatch --nodes=1 slurm/train.slurm --model OpenR1-Distill-7B --task sft --config distill --accelerator zero3
+```
+
+## OlympicCoder
+
+To train the OlympicCoder models, run:
+
+```
+# 7B
+sbatch --nodes=1 slurm/train.slurm --model OlympicCoder-7B --task sft --config v00.00 --accelerator zero3
+
+# 32B
+sbatch --nodes=16 slurm/train.slurm --model OlympicCoder-32B --task sft --config v00.00 --accelerator fsdp
+```
+
+Note that we found it necessary to switch to FSDP1 and paged AdamW 8-bit for the 32B model in order to fit the largest possible context size.
\ No newline at end of file
diff --git a/configs/ddp.yaml b/recipes/accelerate_configs/ddp.yaml
similarity index 100%
rename from configs/ddp.yaml
rename to recipes/accelerate_configs/ddp.yaml
diff --git a/recipes/accelerate_configs/fsdp.yaml b/recipes/accelerate_configs/fsdp.yaml
new file mode 100644
index 000000000..938427c90
--- /dev/null
+++ b/recipes/accelerate_configs/fsdp.yaml
@@ -0,0 +1,27 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+distributed_type: FSDP
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+fsdp_config:
+ fsdp_activation_checkpointing: false # Need fix from: https://github.com/huggingface/transformers/pull/36610
+ fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
+ fsdp_backward_prefetch: BACKWARD_PRE
+ fsdp_cpu_ram_efficient_loading: true
+ fsdp_forward_prefetch: true
+ fsdp_offload_params: false
+ fsdp_sharding_strategy: FULL_SHARD
+ fsdp_state_dict_type: FULL_STATE_DICT
+ fsdp_sync_module_states: true
+ fsdp_use_orig_params: true
+machine_rank: 0
+main_training_function: main
+mixed_precision: bf16
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
\ No newline at end of file
diff --git a/configs/zero2.yaml b/recipes/accelerate_configs/zero2.yaml
similarity index 100%
rename from configs/zero2.yaml
rename to recipes/accelerate_configs/zero2.yaml
diff --git a/configs/zero3.yaml b/recipes/accelerate_configs/zero3.yaml
similarity index 100%
rename from configs/zero3.yaml
rename to recipes/accelerate_configs/zero3.yaml
diff --git a/recipes/dataset_filtering/config_demo.yaml b/recipes/dataset_filtering/config_demo.yaml
new file mode 100644
index 000000000..a1168512e
--- /dev/null
+++ b/recipes/dataset_filtering/config_demo.yaml
@@ -0,0 +1,28 @@
+# Model arguments
+model_name_or_path: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
+model_revision: main
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+# We edit the DeepSeek chat template to ensure (a) the reasoning block within and is included in the completion and (b) the tag is not part of the prefill so that the format reward works
+chat_template: "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}"
+dataset_name: open-r1/OpenR1-Math-220k
+dataset_prompt_column: problem
+system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: \n...\n\n\n...\n"
+
+# Generation arguments
+max_completion_length: 2048
+num_generations: 8
+temperature: 0.7
+top_p: 0.95
+
+# Reward func arguments
+reward_funcs:
+- accuracy
+reward_weights:
+- 1.0
+
+# Filtering arguments. Samples with a pass rate outside the interval `pass_rate_min < x < pass_rate_max` will be filtered.
+pass_rate_min: 0.2
+pass_rate_max: 0.8
diff --git a/recipes/dataset_filtering/filter_dapo.yaml b/recipes/dataset_filtering/filter_dapo.yaml
new file mode 100644
index 000000000..8c8e68c93
--- /dev/null
+++ b/recipes/dataset_filtering/filter_dapo.yaml
@@ -0,0 +1,28 @@
+# Model arguments
+model_name_or_path: open-r1/R1-Distill-Qwen-Math-7B
+model_revision: v03.00-step-000008190
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+# We edit the DeepSeek chat template to ensure (a) the reasoning block within and is included in the completion and (b) the tag is not part of the prefill so that the format reward works
+dataset_name: open-r1/DAPO-Math-17k-Processed
+dataset_config: all
+dataset_split: train
+
+# Generation arguments
+max_completion_length: 32000
+num_generations: 8
+temperature: 1.0
+
+# Reward func arguments
+reward_funcs:
+- accuracy
+reward_weights:
+- 1.0
+
+# Filtering arguments. Samples with mean reward outside of low / high will be filtered
+pass_rate_min: 0.1
+pass_rate_max: 0.6
+
+output_dataset_name: open-r1/DAPO-Math-17k-Processed-R1-Distill-Qwen-Math-7B-v03.00-step-000008190-filter
diff --git a/recipes/dataset_filtering/filter_python.yaml b/recipes/dataset_filtering/filter_python.yaml
new file mode 100644
index 000000000..ce699dcec
--- /dev/null
+++ b/recipes/dataset_filtering/filter_python.yaml
@@ -0,0 +1,26 @@
+# Model arguments
+model_name_or_path: open-r1/R1-Distill-Qwen-Math-7B-Merges
+model_revision: v00.00-step-000003660_v01.00-step-000002600_weights-0.50-0.50
+torch_dtype: bfloat16
+attn_implementation: flash_attention_2
+
+# Data training arguments
+# We edit the DeepSeek chat template to ensure (a) the reasoning block within and is included in the completion and (b) the tag is not part of the prefill so that the format reward works
+dataset_name: open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled
+dataset_prompt_column: problem
+
+# Generation arguments
+max_completion_length: 16000
+num_generations: 8
+temperature: 0.7
+
+# Reward func arguments
+reward_funcs:
+- binary_code
+reward_weights:
+- 1.0
+e2b_router_url: ip-10-53-85-92:8000
+
+# Filtering arguments. Samples with mean reward outside of low / high will be filtered
+pass_rate_min: 0.1
+pass_rate_max: 0.6
diff --git a/scripts/benchmark_e2b.py b/scripts/benchmark_e2b.py
new file mode 100644
index 000000000..ac2fb1835
--- /dev/null
+++ b/scripts/benchmark_e2b.py
@@ -0,0 +1,85 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Benchmark script for the code_reward function with E2B.
+
+This script measures the performance of the code_reward function with varying numbers
+of samples and parallelization levels.
+
+Each sample is a CodeForces problem with a gold standard solution that is executed against a set of public test cases.
+"""
+
+from datasets import load_dataset
+import time
+from tqdm.auto import tqdm
+
+from dotenv import load_dotenv
+load_dotenv()
+
+from open_r1.rewards import code_reward
+
+def benchmark_code_reward(example):
+ start_time = time.time()
+ test_completions = [[{"content": example["gold_standard_solution"]}]]
+ reward_kwargs = {"verification_info": [example["verification_info"]]}
+ rewards = code_reward(test_completions, **reward_kwargs)
+ end_time = time.time()
+ example["test_reward"] = rewards[0]
+ example["reward_time"] = end_time - start_time
+ return example
+
+if __name__ == "__main__":
+ parallel_dict = {
+ 16:[1,4,16],
+ 64:[4,16, 64],
+ 256:[16, 64, 96], # cap at 96 as PRO account is limited to 100
+ }
+ # Store results for table formatting
+ results = []
+
+ for num_samples in tqdm([16, 64,256], desc="Benchmarking samples"):
+ for num_parallel in parallel_dict[num_samples]:
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated")
+ code_dataset = code_dataset["train"].shuffle(seed=42).select(range(num_samples))
+
+ test_completions = [[{"content": example["gold_standard_solution"]}] for example in code_dataset]
+ reward_kwargs = {"verification_info": [example["verification_info"] for example in code_dataset]}
+
+ start_time = time.time()
+ rewards = code_reward(test_completions, num_parallel=num_parallel, **reward_kwargs)
+ execution_time = time.time() - start_time
+
+ # Calculate some statistics about rewards
+ mean_reward = sum(rewards) / len(rewards)
+ min_reward = min(rewards)
+ max_reward = max(rewards)
+
+ # Store results
+ results.append({
+ "num_samples": num_samples,
+ "num_parallel": num_parallel,
+ "execution_time": execution_time,
+ "mean_reward": mean_reward,
+ "min_reward": min_reward,
+ "max_reward": max_reward
+ })
+
+ print("\n## Benchmark Results\n")
+ print("| Sample Size | Parallelization | Execution Time (s) | Mean Reward | Min Reward | Max Reward |")
+ print("|:-----------:|:---------------:|------------------:|:-----------:|:-----------:|:-----------:|")
+
+ for result in results:
+ print(f"| {result['num_samples']:^11} | {result['num_parallel']:^15} | {result['execution_time']:17.2f} | {result['mean_reward']:^11.4f} | {result['min_reward']:^11.4f} | {result['max_reward']:^11.4f} |")
+
diff --git a/scripts/decontaminate.py b/scripts/decontaminate.py
new file mode 100644
index 000000000..0ef13df3a
--- /dev/null
+++ b/scripts/decontaminate.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This script is used to decontaminate a dataset by checking for n-gram overlap with other datasets.
+It uses the same approach presented in https://huggingface.co/papers/2501.19393,
+as found in: https://github.com/simplescaling/s1/blob/main/data/decontaminate_util.py
+
+Usage:
+
+python scripts/decontaminate.py \
+ --dataset open-r1/verifiable-coding-problems-python \
+ --split train \
+ --ngram_size 8 \
+ --problem_column problem \
+ --cleanup
+"""
+
+import collections
+
+from tqdm import tqdm
+
+
+def normalize_string(text: str) -> str:
+ """Basic string normalization."""
+ # Convert to lowercase and normalize whitespace
+ text = text.lower().strip()
+ # Replace multiple spaces with single space
+ text = " ".join(text.split())
+ return text
+
+
+def word_ngrams(text: str, n: int) -> list:
+ """Generate word-level n-grams from text."""
+ words = text.split()
+ return [" ".join(words[i : i + n]) for i in range(len(words) - n + 1)]
+
+
+def build_ngram_lookup(documents: list[str], ngram_size: int = 8) -> dict[str, set[int]]:
+ """Build ngram lookup for documents."""
+ lookup = collections.defaultdict(set)
+
+ for doc_id, document in enumerate(tqdm(documents)):
+ normalized_text = normalize_string(document)
+ ngrams = word_ngrams(normalized_text, ngram_size)
+ for ngram in ngrams:
+ lookup[ngram].add(doc_id)
+
+ return lookup
+
+
+def build_ngram_single(document: str, ngram_size: int = 8) -> set[str]:
+ normalized_text = normalize_string(document)
+ ngrams = word_ngrams(normalized_text, ngram_size)
+
+ return set(ngrams)
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--dataset", type=str, required=True, help="Name of the dataset to check for contamination.")
+ parser.add_argument("--config", type=str, default=None, help="Name of the dataset config to load.")
+ parser.add_argument("--split", type=str, default="train", help="Split to check for contamination, defaults to `train`.")
+ parser.add_argument("--ngram_size", type=int, default=8, help="Size of n-grams to build, defaults to 8.")
+ parser.add_argument(
+ "--problem_column", type=str, default="problem", help="Name of the column containing the problem (prompt)."
+ )
+ parser.add_argument(
+ "--cleanup",
+ action="store_true",
+ help="Whether to remove the contaminated rows before pushing the dataset.",
+ )
+ parser.add_argument(
+ "--new_dataset_name",
+ type=str,
+ default=None,
+ help="New name for the dataset. If not provided, will reuse the name and add a `_decontaminated` to the name."
+ )
+ args = parser.parse_args()
+
+ from datasets import load_dataset, Dataset
+
+ # Load the dataset to check for contamination
+ ds = load_dataset(args.dataset, name=args.config, split=args.split)
+
+ eval_datasets = {
+ "aime_2024": (load_dataset("HuggingFaceH4/aime_2024", split="train"), "problem"),
+ "aime_2025": (load_dataset("yentinglin/aime_2025", split="train"), "problem"),
+ "math_500": (load_dataset("HuggingFaceH4/MATH-500", split="test"), "problem"),
+ "gpqa": (load_dataset("Idavidrein/gpqa", "gpqa_diamond", split="train", trust_remote_code=True), "Question"),
+ "lcb": (
+ load_dataset(
+ "livecodebench/code_generation_lite", split="test", version_tag="v4_v5", trust_remote_code=True
+ ),
+ "question_content",
+ ),
+ }
+ ngram_lookups = {}
+ for ds_name, (eval_dataset, problem_col) in eval_datasets.items():
+ ngram_lookups[ds_name] = build_ngram_lookup(eval_dataset[problem_col], ngram_size=args.ngram_size)
+
+ for eval_name, ngram_lookup in ngram_lookups.items():
+ # Update the ngram_lookup variable for each dataset
+ def find_contaminated(row):
+ # For each example we have to build the ngrams and check for all of them on each row
+ ngrams = build_ngram_single(row[args.problem_column], ngram_size=args.ngram_size)
+ row[f"contaminated_{eval_name}"] = any(set(ngram in ngram_lookup for ngram in ngrams))
+ return row
+
+ ds = ds.map(find_contaminated, num_proc=8)
+
+ # Allow cleaning up via CLI args (removing the contaminated examples and dropping the columns)
+ def cleanup(dataset: Dataset) -> Dataset:
+ initial_size = len(dataset)
+ contamination_cols = [col for col in dataset.column_names if col.startswith("contaminated_")]
+ for col in contamination_cols:
+ if col.startswith("contaminated_"):
+ size_prior = len(dataset)
+ dataset = dataset.filter(lambda x: not x[col], num_proc=8)
+ if len(dataset) < size_prior:
+ print(f"Removed {size_prior - len(dataset)} samples from '{col.replace('contaminated_', '')}'")
+ dataset = dataset.remove_columns(contamination_cols)
+ print(f"Initial size: {initial_size}, Final size: {len(dataset)}")
+ return dataset
+
+ if args.cleanup:
+ ds = cleanup(ds)
+
+ new_ds_name = args.new_dataset_name or f"{args.dataset}_decontaminated"
+ config_name = args.config if args.config is not None else "default"
+ url = ds.push_to_hub(new_ds_name, config_name=config_name, split="train")
+ print(f"Decontaminated dataset: {url}")
diff --git a/scripts/e2b_router.py b/scripts/e2b_router.py
new file mode 100644
index 000000000..9cc25603c
--- /dev/null
+++ b/scripts/e2b_router.py
@@ -0,0 +1,161 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+from fastapi import FastAPI
+from pydantic import BaseModel, ConfigDict
+from typing import Optional
+from fastapi import FastAPI, Request
+import argparse
+import asyncio
+from fastapi import FastAPI
+import uvicorn
+from e2b_code_interpreter.models import Execution
+from dotenv import load_dotenv
+from e2b_code_interpreter import AsyncSandbox
+
+load_dotenv()
+
+class BatchRequest(BaseModel):
+ """
+ BatchRequest is a data model representing a batch processing request.
+
+ Attributes:
+ scripts (list[str]): A list of script names or paths to be executed.
+ languages (list[str]): The programming languages for each script in the list.
+ timeout (int): The maximum allowed execution time for each script in seconds.
+ request_timeout (int): The maximum allowed time for the entire batch request in seconds.
+ """
+ scripts: list[str]
+ languages: list[str]
+ timeout: int
+ request_timeout: int
+
+class ScriptResult(BaseModel):
+ """
+ ScriptResult is a Pydantic model that represents the result of a script execution.
+ Attributes:
+ execution (Optional[Execution]): An optional instance of the `Execution` class
+ that contains details about the script's execution, such as status, output,
+ or any other relevant metadata.
+ exception_str (Optional[str]): An optional string that captures the exception
+ message or details if an error occurred during the script's execution.
+ model_config (ConfigDict): A configuration dictionary that allows arbitrary
+ types to be used within the Pydantic model. This is necessary to support
+ custom types like `Execution` within the model.
+ """
+ execution: Optional[Execution]
+ exception_str: Optional[str]
+
+ # required to allow arbitrary types in pydantic models such as Execution
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+
+def create_app(args):
+ """
+ Creates and configures a FastAPI application instance.
+ Args:
+ args: An object containing configuration parameters for the application.
+ - num_sandboxes (int): The maximum number of concurrent sandboxes allowed.
+ Returns:
+ FastAPI: A configured FastAPI application instance.
+ The application includes the following endpoints:
+ 1. GET /health:
+ - Returns the health status of the application.
+ - Response: {"status": "ok"}
+ 2. POST /execute_batch:
+ - Executes a batch of scripts in an isolated sandbox environment.
+ - Request Body: BatchRequest object containing:
+ - languages (list[str]): The programming languages of the scripts (python or javascript).
+ - timeout (int): The maximum execution time for each script.
+ - request_timeout (int): The timeout for the request itself.
+ - scripts (List[str]): A list of scripts to execute.
+ - Response: A list of ScriptResult objects for each script, containing:
+ - execution: The result of the script execution.
+ - exception_str: Any exception encountered during execution.
+ Notes:
+ - A semaphore is used to limit the number of concurrent sandboxes.
+ - Each script execution is wrapped in a timeout to prevent hanging.
+ - Sandboxes are cleaned up after execution, even in case of errors.
+ """
+ app = FastAPI()
+
+ # Instantiate semaphore and attach it to app state
+ app.state.sandbox_semaphore = asyncio.Semaphore(args.max_num_sandboxes)
+
+ @app.get("/health")
+ async def health():
+ return {"status": "ok"}
+
+ @app.post("/execute_batch")
+ async def execute_batch(batch: BatchRequest, request: Request):
+ semaphore = request.app.state.sandbox_semaphore
+ languages = batch.languages
+ timeout = batch.timeout
+ request_timeout = batch.request_timeout
+ asyncio_timeout = batch.timeout + 1
+
+ async def run_script(script: str, language: str) -> ScriptResult:
+
+ async with semaphore:
+ try:
+ sandbox = await AsyncSandbox.create(
+ timeout=timeout,
+ request_timeout=request_timeout,
+ )
+ execution = await asyncio.wait_for(
+ sandbox.run_code(script, language=language),
+ timeout=asyncio_timeout,
+ )
+ return ScriptResult(execution=execution, exception_str=None)
+
+ except Exception as e:
+ return ScriptResult(execution=None, exception_str=str(e))
+
+ finally:
+ try:
+ await sandbox.kill()
+ except Exception:
+ pass
+
+ tasks = [run_script(script, lang) for script, lang in zip(batch.scripts, batch.languages)]
+ return await asyncio.gather(*tasks)
+
+ return app
+
+
+def parse_args():
+ """
+ Parse command-line arguments for the e2b_router script.
+
+ Arguments:
+ --host (str): The hostname or IP address to bind the server to. Defaults to "0.0.0.0" (binds to all interfaces).
+ --port (int): The port number on which the server will listen. Defaults to 8000.
+ --max_num_sandboxes (int): The maximum number of sandboxes that can be created or managed simultaneously. Defaults to 20.
+
+ Returns:
+ argparse.Namespace: Parsed command-line arguments as an object.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", default="0.0.0.0")
+ parser.add_argument("--port", type=int, default=8000)
+ parser.add_argument("--max_num_sandboxes", type=int, default=20)
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = parse_args()
+ app = create_app(args)
+
+ uvicorn.run(app, host=args.host, port=args.port)
\ No newline at end of file
diff --git a/scripts/generate_reasoning.py b/scripts/generate_reasoning.py
new file mode 100644
index 000000000..11bdb7e88
--- /dev/null
+++ b/scripts/generate_reasoning.py
@@ -0,0 +1,174 @@
+import argparse
+import asyncio
+import hashlib
+import json
+import os
+import random
+from asyncio import Lock
+from typing import Set
+
+from datasets import load_dataset
+from tqdm.asyncio import tqdm
+
+import aiofiles
+import aiohttp
+import uvloop
+
+
+file_lock = Lock()
+
+
+async def generate_completion(session, prompt, args):
+ retry_budget = 10
+ while retry_budget > 0:
+ try:
+ await asyncio.sleep(random.uniform(0.0, 0.1))
+ async with session.post(
+ f"http://{args.api_addr}/v1/chat/completions",
+ json={
+ "model": "default",
+ "messages": [{"role": "user", "content": prompt}],
+ "max_tokens": args.max_tokens,
+ "temperature": args.temperature,
+ "top_p": args.top_p,
+ },
+ headers={"Authorization": "Bearer EMPTY"},
+ ) as response:
+ return await response.json(content_type=None)
+ except Exception as e:
+ print(f"API error (will retry): {e}")
+ retry_budget -= 1
+ await asyncio.sleep(10)
+ return None
+
+
+async def process_example(example, session, args, output_file, pbar):
+ prompt = args.prompt_template.format(prompt=example[args.prompt_column])
+
+ try:
+ tasks = [generate_completion(session, prompt, args) for _ in range(args.num_generations)]
+
+ completions = await asyncio.gather(*tasks)
+
+ if any(completion is None for completion in completions):
+ print(f"Error processing example")
+ pbar.update(1)
+ return None
+
+ generations = []
+ finish_reasons = []
+ api_metadata = []
+
+ for completion in completions:
+ generations.append(completion["choices"][0]["message"]["content"])
+ finish_reasons.append(completion["choices"][0]["finish_reason"])
+ api_metadata.append(completion["usage"])
+
+ # Combine original dataset fields with generations
+ result = {
+ **example, # Preserve all original dataset fields
+ "generations": generations,
+ "finish_reasons": finish_reasons,
+ "api_metadata": api_metadata,
+ }
+
+ # Write to file with lock
+ async with file_lock:
+ async with aiofiles.open(output_file, mode="a") as f:
+ await f.write(json.dumps(result) + "\n")
+ await f.flush()
+
+ pbar.set_postfix(active=len(pbar.active_tasks), refresh=False)
+ pbar.update(1)
+
+ return result
+ except Exception as e:
+ print(f"Error processing example: {e}")
+ pbar.update(1)
+ return None
+
+
+async def load_processed_uuids(output_file, uuid_column):
+ processed_uuids = set()
+ if os.path.exists(output_file):
+ async with aiofiles.open(output_file, mode="r") as f:
+ async for line in f:
+ try:
+ data = json.loads(line)
+ processed_uuids.add(hashlib.md5(str(data[uuid_column]).encode()).hexdigest())
+ except json.JSONDecodeError:
+ continue
+ return processed_uuids
+
+
+async def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--dataset-name", type=str, required=True)
+ parser.add_argument("--output-file", type=str, required=True)
+ parser.add_argument("--prompt-column", type=str, required=True)
+ parser.add_argument("--uuid-column", type=str, required=True)
+ parser.add_argument("--api-addr", type=str, default="localhost:39876")
+ parser.add_argument("--num-generations", type=int, default=4)
+ parser.add_argument(
+ "--prompt-template",
+ type=str,
+ default="You will be given a problem. Please reason step by step, and put your final answer within \\boxed{{}}:\n{prompt}",
+ )
+ parser.add_argument("--temperature", type=float, default=0.6)
+ parser.add_argument("--top-p", type=float, default=0.95)
+ parser.add_argument("--max-tokens", type=int, default=16384)
+ parser.add_argument("--max-concurrent", type=int, default=1000)
+ args = parser.parse_args()
+
+ dataset = load_dataset(args.dataset_name, split="train").shuffle()
+ processed_uuids = await load_processed_uuids(args.output_file, args.uuid_column)
+ if processed_uuids:
+ print(f"Found {len(processed_uuids)} already processed examples, resuming from there...")
+
+ if not os.path.exists(args.output_file):
+ async with aiofiles.open(args.output_file, mode="w") as f:
+ await f.write("")
+
+ active_tasks: Set[asyncio.Task] = set()
+
+ pbar = tqdm(
+ total=len(dataset) - len(processed_uuids),
+ desc="Generating responses",
+ unit="row",
+ mininterval=2,
+ smoothing=0.0001,
+ )
+ pbar.active_tasks = active_tasks
+
+ async with aiohttp.ClientSession(
+ timeout=aiohttp.ClientTimeout(total=60 * 60),
+ connector=aiohttp.TCPConnector(limit=args.max_concurrent, ttl_dns_cache=300, keepalive_timeout=60 * 60),
+ ) as session:
+ for example in dataset:
+ uuid = hashlib.md5(str(example[args.uuid_column]).encode()).hexdigest()
+ if uuid not in processed_uuids:
+ # Wait if we've hit the concurrency limit
+ while len(active_tasks) >= args.max_concurrent:
+ done, active_tasks = await asyncio.wait(active_tasks, return_when=asyncio.FIRST_COMPLETED)
+ for task in done:
+ try:
+ await task
+ except Exception as e:
+ print(f"Task failed: {e}")
+
+ task = asyncio.create_task(process_example(example, session, args, args.output_file, pbar))
+ active_tasks.add(task)
+ task.add_done_callback(active_tasks.discard)
+
+ pbar.set_postfix(active=len(active_tasks), refresh=True)
+
+ # Wait for remaining tasks
+ if active_tasks:
+ await asyncio.gather(*active_tasks, return_exceptions=True)
+
+ pbar.close()
+
+
+if __name__ == "__main__":
+ uvloop.install()
+ asyncio.run(main())
diff --git a/scripts/get_tensor_parallel_size.py b/scripts/get_tensor_parallel_size.py
new file mode 100644
index 000000000..d6c61154c
--- /dev/null
+++ b/scripts/get_tensor_parallel_size.py
@@ -0,0 +1,28 @@
+import argparse
+from transformers import AutoConfig
+from math import gcd
+
+def get_tensor_parallel_size(model_name: str, revision: str = None, default_tp: int = 8) -> int:
+ try:
+ config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=True)
+ num_heads = getattr(config, 'num_attention_heads', None)
+
+ if num_heads is not None and num_heads % default_tp != 0:
+ tp = gcd(num_heads, default_tp)
+ return max(tp, 1)
+ else:
+ return default_tp
+ except Exception as e:
+ print(f"Warning: Failed to fetch config for {model_name}@{revision}: {e}")
+ return default_tp
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model_name", type=str, required=True, help="Hugging Face model name or path")
+ parser.add_argument("--revision", type=str, default=None, help="Model revision if applicable")
+ parser.add_argument("--default_tp", type=int, default=8, help="Default TP size (usually GPUs per node)")
+
+ args = parser.parse_args()
+
+ tp = get_tensor_parallel_size(args.model_name, args.revision, args.default_tp)
+ print(tp)
diff --git a/scripts/morph_router.py b/scripts/morph_router.py
new file mode 100644
index 000000000..166b9cce1
--- /dev/null
+++ b/scripts/morph_router.py
@@ -0,0 +1,173 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import asyncio
+from fastapi import FastAPI
+from pydantic import BaseModel, ConfigDict
+from typing import Optional, List
+from fastapi import FastAPI, Request
+import uvicorn
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+
+class BatchRequest(BaseModel):
+ """
+ BatchRequest is a data model representing a batch processing request.
+
+ Attributes:
+ scripts (list[str]): A list of script names or paths to be executed.
+ languages (List[str]): The programming languages for each script in the list.
+ timeout (int): The maximum allowed execution time for each script in seconds.
+ request_timeout (int): The maximum allowed time for the entire batch request in seconds.
+ """
+ scripts: List[str]
+ languages: List[str]
+ timeout: int
+ request_timeout: int
+
+class ScriptResult(BaseModel):
+ """
+ ScriptResult is a Pydantic model that represents the result of a script execution.
+ Attributes:
+ text (Optional[str]): The output text from the script execution.
+ exception_str (Optional[str]): An optional string that captures the exception
+ message or details if an error occurred during the script's execution.
+ model_config (ConfigDict): A configuration dictionary that allows arbitrary
+ types to be used within the Pydantic model.
+ """
+ text: Optional[str]
+ exception_str: Optional[str]
+
+
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+
+def create_app(args):
+ """
+ Creates and configures a FastAPI application instance for the MorphCloud router.
+
+ Args:
+ args: An object containing configuration parameters for the application.
+ - max_num_sandboxes (int): The maximum number of concurrent sandboxes allowed.
+ - api_key (str): The MorphCloud API key to use.
+
+ Returns:
+ FastAPI: A configured FastAPI application instance.
+ """
+ app = FastAPI()
+
+ from morphcloud.api import MorphCloudClient
+ from morphcloud.sandbox import Sandbox
+
+ app.state.client = MorphCloudClient(api_key=args.api_key)
+ app.state.Sandbox = Sandbox
+
+ app.state.sandbox_semaphore = asyncio.Semaphore(args.max_num_sandboxes)
+
+ @app.get("/health")
+ async def health():
+ return {"status": "ok"}
+
+ @app.post("/execute_batch")
+ async def execute_batch(batch: BatchRequest, request: Request):
+ semaphore = request.app.state.sandbox_semaphore
+ client = request.app.state.client
+ Sandbox = request.app.state.Sandbox
+
+ languages = batch.languages
+ timeout = batch.timeout
+ request_timeout = batch.request_timeout
+ asyncio_timeout = batch.timeout + 1
+
+ async def run_script(script: str, language: str) -> ScriptResult:
+ sandbox = None
+ sandbox_id = "unknown"
+
+ async with semaphore:
+ try:
+ sandbox = await asyncio.to_thread(
+ Sandbox.new,
+ client=client,
+ ttl_seconds=timeout
+ )
+
+ sandbox_id = getattr(sandbox, 'id', None) or getattr(sandbox._instance, 'id', 'unknown')
+
+ execution = await asyncio.wait_for(
+ asyncio.to_thread(
+ sandbox.run_code,
+ script,
+ language=language,
+ timeout=timeout * 1000
+ ),
+ timeout=asyncio_timeout,
+ )
+
+ if hasattr(execution, 'text') and execution.text:
+ return ScriptResult(text=execution.text, exception_str=None)
+ elif hasattr(execution, 'stdout') and execution.stdout:
+ return ScriptResult(text=execution.stdout, exception_str=None)
+ else:
+ return ScriptResult(text="", exception_str="No output from execution")
+
+ except Exception as e:
+ return ScriptResult(text=None, exception_str=str(e))
+
+ finally:
+ if sandbox:
+ try:
+ await asyncio.to_thread(sandbox.close)
+ await asyncio.to_thread(sandbox.shutdown)
+ except Exception:
+ pass
+
+ tasks = [run_script(script, lang) for script, lang in zip(batch.scripts, batch.languages)]
+ return await asyncio.gather(*tasks)
+
+ return app
+
+def parse_args():
+ """
+ Parse command-line arguments for the morph_router script.
+
+ Arguments:
+ --host (str): The hostname or IP address to bind the server to. Defaults to "0.0.0.0".
+ --port (int): The port number on which the server will listen. Defaults to 8001.
+ --max_num_sandboxes (int): The maximum number of sandboxes that can be created simultaneously. Defaults to 20.
+ --api_key (str): The MorphCloud API key. If not provided, it will be read from the MORPH_API_KEY environment variable.
+
+ Returns:
+ argparse.Namespace: Parsed command-line arguments as an object.
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--host", default="0.0.0.0")
+ parser.add_argument("--port", type=int, default=8001)
+ parser.add_argument("--max_num_sandboxes", type=int, default=20)
+ parser.add_argument("--api_key", default=os.getenv("MORPH_API_KEY"))
+ args = parser.parse_args()
+
+ if not args.api_key:
+ raise ValueError("MorphCloud API key not provided. Please set MORPH_API_KEY environment variable or use --api_key.")
+
+ return args
+
+if __name__ == "__main__":
+ args = parse_args()
+ app = create_app(args)
+
+ print(f"Starting MorphCloud Router on {args.host}:{args.port}")
+ uvicorn.run(app, host=args.host, port=args.port)
\ No newline at end of file
diff --git a/scripts/pass_rate_filtering/README.md b/scripts/pass_rate_filtering/README.md
new file mode 100644
index 000000000..0c2fd88fb
--- /dev/null
+++ b/scripts/pass_rate_filtering/README.md
@@ -0,0 +1,36 @@
+# Pass rate filtering
+
+We provide support to filter datasets by generating and computing pass rate on veriable tasks
+
+See `scripts/pass_rate_filtering/compute_pass_rate.py` and `scripts/pass_rate_filtering/launch_filtering.sh` (hardcoded for DAPO at the moment)
+
+By default the script chunks the dataset, merge can be run using the following snippet (example for DAPO) :
+
+from datasets import load_dataset, concatenate_datasets
+
+name = "open-r1/DAPO-Math-17k-Processed-R1-Distill-Qwen-Math-7B-Merges-v00.02-v01.02-0.3-0.7-filter"
+
+```python
+gen_datasets = []
+filt_datasets = []
+for start in range(0,17400,200):
+ end = start + 200
+ if start == 17200:
+ end = 17398
+ gen_config_name = f"gen-{start}-{end}"
+ gen_dataset = load_dataset(name, gen_config_name, revision="gen", split="train")
+ gen_datasets.append(gen_dataset)
+
+ filt_config_name = f"filt-0.1-0.6-{start}-{end}"
+ filt_dataset = load_dataset(name, filt_config_name, revision="pass_rate", split="train")
+ filt_datasets.append(filt_dataset)
+
+gen_dataset = concatenate_datasets(gen_datasets)
+gen_dataset.push_to_hub(name, config_name="gen", split="train")
+print(gen_dataset)
+
+filt_dataset = concatenate_datasets(filt_datasets)
+filt_dataset.push_to_hub(name, config_name="default", split="train")
+
+print(filt_dataset)
+```
\ No newline at end of file
diff --git a/scripts/pass_rate_filtering/compute_pass_rate.py b/scripts/pass_rate_filtering/compute_pass_rate.py
new file mode 100644
index 000000000..dcc5286d3
--- /dev/null
+++ b/scripts/pass_rate_filtering/compute_pass_rate.py
@@ -0,0 +1,205 @@
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# example usage python scripts/filter_dataset.py --config recipes/dataset_filtering/config_demo.yaml
+
+import logging
+from dataclasses import dataclass
+from git import Optional
+import torch
+import sys
+
+import datasets
+import transformers
+from datasets import load_dataset
+from transformers import set_seed
+
+from open_r1.configs import GRPOConfig, GRPOScriptArguments
+from open_r1.rewards import get_reward_funcs
+from open_r1.utils import get_tokenizer
+from trl import ModelConfig, TrlParser
+from trl.data_utils import apply_chat_template
+from vllm import LLM, SamplingParams
+
+logger = logging.getLogger(__name__)
+
+@dataclass
+class PassRateScriptArguments(GRPOScriptArguments):
+ # we can be lazy and just use the same script args as GRPO
+ output_dataset_name: Optional[str] = None
+ pass_rate_min: float = 0.1
+ pass_rate_max: float = 0.9
+ dataset_start_index: Optional[int] = None
+ dataset_end_index: Optional[int] = None
+ dataset_split: str = "train"
+
+
+def main(script_args, training_args, model_args):
+ # Set seed for reproducibility
+ set_seed(training_args.seed)
+
+ ###############
+ # Setup logging
+ ###############
+ logging.basicConfig(
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ handlers=[logging.StreamHandler(sys.stdout)],
+ )
+ log_level = training_args.get_process_log_level()
+ logger.setLevel(log_level)
+ datasets.utils.logging.set_verbosity(log_level)
+ transformers.utils.logging.set_verbosity(log_level)
+ transformers.utils.logging.enable_default_handler()
+ transformers.utils.logging.enable_explicit_format()
+
+ logger.info(f"Model parameters {model_args}")
+ logger.info(f"Script parameters {script_args}")
+ logger.info(f"Training parameters {training_args}")
+
+ # Load the dataset
+ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config, split=script_args.dataset_split)
+ if script_args.dataset_start_index is not None and script_args.dataset_end_index is not None:
+ dataset = dataset.select(range(script_args.dataset_start_index, script_args.dataset_end_index))
+
+ # Get reward functions from the registry
+ reward_funcs = get_reward_funcs(script_args)
+
+ # Format into conversation
+ def make_conversation(example, prompt_column: str = script_args.dataset_prompt_column):
+ example["prompt_backup"] = example[prompt_column]
+
+ prompt = []
+
+ if training_args.system_prompt is not None:
+ prompt.append({"role": "system", "content": training_args.system_prompt})
+
+ if prompt_column not in example:
+ raise ValueError(f"Dataset Question Field Error: {prompt_column} is not supported.")
+
+ prompt.append({"role": "user", "content": example[prompt_column]})
+ return {"prompt": prompt}
+
+ dataset = dataset.map(make_conversation)
+ tokenizer = get_tokenizer(model_args, training_args)
+
+ if "messages" in dataset.column_names:
+ dataset = dataset.remove_columns("messages")
+
+ dataset = dataset.map(apply_chat_template, fn_kwargs={"tokenizer": tokenizer})
+ llm = LLM(
+ model=model_args.model_name_or_path,
+ revision=model_args.model_revision,
+ trust_remote_code=model_args.trust_remote_code,
+ )
+
+ sampling_params=SamplingParams(
+ temperature=training_args.temperature,
+ top_p=training_args.top_p,
+ top_k=training_args.top_k,
+ n=training_args.num_generations,
+ max_tokens=training_args.max_completion_length,
+ )
+
+ def batch_score(examples):
+ prompts = examples["prompt"]
+
+ outputs = llm.generate(
+ prompts,
+ sampling_params=sampling_params,
+ use_tqdm=False,
+ )
+ repeated_prompts = []
+ reward_completions = []
+ grouped_completions = []
+ for output in outputs:
+ prompt = output.prompt
+ group = []
+ for completion in output.outputs:
+ text = completion.text
+ group.append(text)
+ message = [{"role": "assistant", "content": text}]
+ repeated_prompts.append(prompt)
+ reward_completions.append(message)
+ grouped_completions.append(group)
+
+ def repeat_each_element_k_times(list_to_repeat: list, k: int) -> list:
+ return [element for item in list_to_repeat for element in [item] * k]
+
+ rewards_per_func = torch.zeros(len(repeated_prompts), len(reward_funcs))
+ for i, reward_func in enumerate(reward_funcs):
+ keys = [key for key in examples.data.keys() if key not in ["prompt", "completion"]]
+ reward_kwargs = {key: repeat_each_element_k_times(examples[key], training_args.num_generations) for key in keys}
+ output_reward_func = reward_func(prompts=repeated_prompts, completions=reward_completions, **reward_kwargs)
+ # Convert None values to NaN
+ output_reward_func = [reward if reward is not None else torch.nan for reward in output_reward_func]
+
+ rewards_per_func[:, i] = torch.tensor(output_reward_func, dtype=torch.float32)
+
+ reshaped_rewards = rewards_per_func.view(-1, training_args.num_generations)
+
+ examples["pass_rate_generations"] = grouped_completions
+ examples["pass_rate_rewards"] = reshaped_rewards.tolist()
+
+
+ return examples
+
+ dataset = dataset.map(batch_score, batched=True, batch_size=64)
+
+ # we need to restore the prompt for the final dataset
+ def restore_prompt(example):
+ example["prompt"] = example["prompt_backup"]
+ return example
+
+ dataset = dataset.map(restore_prompt)
+ dataset = dataset.remove_columns("prompt_backup")
+
+ if script_args.output_dataset_name is not None:
+ output_dataset_name = script_args.output_dataset_name
+ else:
+ model_name = model_args.model_name_or_path
+ if "/" in model_name:
+ model_name = model_name.split("/")[-1]
+ model_revision = model_args.model_revision
+
+ output_dataset_name = f"{script_args.dataset_name}-{model_name}-{model_revision}-gen"
+
+ config_name="default"
+ filtered_config_name = f"filt-{script_args.pass_rate_min}-{script_args.pass_rate_max}"
+
+ if script_args.dataset_start_index is not None and script_args.dataset_end_index is not None:
+ config_name = f"gen-{script_args.dataset_start_index}-{script_args.dataset_end_index}"
+ filtered_config_name = f"{filtered_config_name}-{script_args.dataset_start_index}-{script_args.dataset_end_index}"
+
+ dataset.push_to_hub(output_dataset_name, config_name=config_name, revision="gen")
+
+ def filter_func(example):
+ rewards = example["pass_rate_rewards"]
+ # get the mean of the rewards that are not None
+ mean_reward = torch.nanmean(torch.tensor(rewards, dtype=torch.float32))
+
+ return script_args.pass_rate_min < mean_reward < script_args.pass_rate_max
+
+ logger.info(f"Filtering dataset with low reward threshold {script_args.pass_rate_min} and high reward threshold {script_args.pass_rate_max}")
+ logger.info(f"Dataset size before filtering: {dataset}")
+ dataset = dataset.filter(filter_func)
+ logger.info(f"Dataset size after filtering: {dataset}")
+ dataset.push_to_hub(output_dataset_name, config_name=filtered_config_name, revision="pass_rate")
+
+
+
+if __name__ == "__main__":
+ parser = TrlParser((PassRateScriptArguments, GRPOConfig, ModelConfig))
+ script_args, training_args, model_args = parser.parse_args_and_config()
+ main(script_args, training_args, model_args)
diff --git a/scripts/pass_rate_filtering/launch_filtering.sh b/scripts/pass_rate_filtering/launch_filtering.sh
new file mode 100644
index 000000000..be357d0a6
--- /dev/null
+++ b/scripts/pass_rate_filtering/launch_filtering.sh
@@ -0,0 +1,15 @@
+
+
+# a bash foor loop from 0 to 17,400 in chunks of 200
+
+for i in {0..17000..200}
+do
+ START=$i
+ END=$((i + 200))
+ echo "Processing chunk from $START to $END"
+
+ # Submit the job to SLURM
+ sbatch slurm/compute_pass_rate.slurm recipes/dataset_filtering/filter_dapo.yaml $START $END
+done
+
+sbatch slurm/compute_pass_rate.slurm recipes/dataset_filtering/filter_dapo.yaml 17200 17398
diff --git a/scripts/run_benchmarks.py b/scripts/run_benchmarks.py
index 3de4cf41e..b7395947a 100644
--- a/scripts/run_benchmarks.py
+++ b/scripts/run_benchmarks.py
@@ -15,7 +15,7 @@
from typing import List, Optional
from open_r1.utils.evaluation import SUPPORTED_BENCHMARKS, run_benchmark_jobs
-from open_r1.configs import SFTConfig, GRPOConfig
+from open_r1.configs import SFTConfig
from trl import ModelConfig, TrlParser
@@ -25,18 +25,14 @@ class ScriptArguments:
default="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
metadata={"help": "The Hub model id to push the model to."},
)
- model_revision: str = field(
- default="main",
- metadata={"help": "The Hub model branch to push the model to."},
- )
+ model_revision: str = field(default="main", metadata={"help": "The Hub model branch to push the model to."})
trust_remote_code: bool = field(default=False, metadata={"help": "Trust the remote code."})
benchmarks: List[str] = field(
- default_factory=lambda: [], metadata={"help": ("The benchmarks to run after training.")}
+ default_factory=lambda: [], metadata={"help": "The benchmarks to run after training."}
)
list_benchmarks: bool = field(default=False, metadata={"help": "List all supported benchmarks."})
system_prompt: Optional[str] = field(
- default=None,
- metadata={"help": "The system prompt to use for the benchmark."},
+ default=None, metadata={"help": "The system prompt to use for the benchmark."}
)
@@ -56,7 +52,8 @@ def main():
system_prompt=args.system_prompt,
)
run_benchmark_jobs(
- benchmark_args, ModelConfig(model_name_or_path="", model_revision="", trust_remote_code=args.trust_remote_code)
+ benchmark_args,
+ ModelConfig(model_name_or_path="", model_revision="", trust_remote_code=args.trust_remote_code),
)
diff --git a/src/open_r1/utils/upload_details.py b/scripts/upload_details.py
similarity index 97%
rename from src/open_r1/utils/upload_details.py
rename to scripts/upload_details.py
index 273e48bdb..caa491cfa 100644
--- a/src/open_r1/utils/upload_details.py
+++ b/scripts/upload_details.py
@@ -39,7 +39,7 @@ class ScriptArguments:
def main():
parser = HfArgumentParser(ScriptArguments)
- args = parser.parse()
+ args = parser.parse_args_into_dataclasses()[0]
if all(file.endswith(".json") for file in args.data_files):
ds = load_dataset("json", data_files=args.data_files)
diff --git a/setup.py b/setup.py
index e1ea11903..a88508b94 100644
--- a/setup.py
+++ b/setup.py
@@ -39,32 +39,41 @@
# IMPORTANT: all dependencies should be listed here with their version requirements, if any.
-# * If a dependency is fast-moving (e.g. transformers), pin to the exact version
+# * If a dependency is fast-moving (e.g. trl), pin to the exact version
_deps = [
- "accelerate>=1.2.1",
+ "accelerate==1.4.0",
"bitsandbytes>=0.43.0",
- "black>=24.4.2",
"datasets>=3.2.0",
- "deepspeed==0.15.4",
+ "deepspeed==0.16.8",
"distilabel[vllm,ray,openai]>=1.5.2",
+ "e2b-code-interpreter>=1.0.5",
"einops>=0.8.0",
"flake8>=6.0.0",
"hf_transfer>=0.1.4",
- "huggingface-hub[cli]>=0.19.2,<1.0",
+ "huggingface-hub[cli,hf_xet]>=0.30.2,<1.0",
"isort>=5.12.0",
- "liger_kernel==0.5.2",
- "lighteval @ git+https://github.com/huggingface/lighteval.git@0e462692436e1f0575bdb4c6ef63453ad9bde7d4#egg=lighteval[math]",
- "math-verify>=0.3.3", # Used for math verification in grpo
+ "jieba", # Needed for Chinese language support
+ "langdetect", # Needed for LightEval's extended tasks
+ "latex2sympy2_extended>=1.0.6",
+ "liger-kernel>=0.5.10",
+ "lighteval @ git+https://github.com/huggingface/lighteval.git@d3da6b9bbf38104c8b5e1acc86f83541f9a502d1", # Critical bug fix for tokenizer revisions: https://github.com/huggingface/lighteval/pull/721
+ "math-verify==0.5.2", # Used for math verification in grpo
+ "morphcloud==0.1.67",
"packaging>=23.0",
"parameterized>=0.9.0",
+ "peft>=0.14.0",
"pytest",
+ "python-dotenv",
+ "ruff>=0.9.0",
"safetensors>=0.3.3",
"sentencepiece>=0.1.99",
- "torch>=2.5.1",
- "transformers @ git+https://github.com/huggingface/transformers.git@main",
- "trl @ git+https://github.com/huggingface/trl.git@main",
- "vllm>=0.7.0",
+ "torch==2.6.0",
+ "transformers==4.52.3",
+ "trl[vllm]==0.18.0",
"wandb>=0.19.1",
+ "async-lru>=2.0.5",
+ "aiofiles>=24.1.0",
+ "pandas>=2.2.3",
]
# this is a lookup table with items like:
@@ -81,11 +90,12 @@ def deps_list(*pkgs):
extras = {}
-extras["tests"] = deps_list("pytest", "parameterized")
+extras["tests"] = deps_list("pytest", "parameterized", "math-verify", "jieba")
extras["torch"] = deps_list("torch")
-extras["quality"] = deps_list("black", "isort", "flake8")
+extras["quality"] = deps_list("ruff", "isort", "flake8")
+extras["code"] = deps_list("e2b-code-interpreter", "python-dotenv", "morphcloud", "jieba", "pandas", "aiofiles")
extras["eval"] = deps_list("lighteval", "math-verify")
-extras["dev"] = extras["quality"] + extras["tests"] + extras["eval"]
+extras["dev"] = extras["quality"] + extras["tests"] + extras["eval"] + extras["code"]
# core dependencies shared across the whole project - keep this to a bare minimum :)
install_requires = [
@@ -96,12 +106,17 @@ def deps_list(*pkgs):
deps["deepspeed"],
deps["hf_transfer"],
deps["huggingface-hub"],
- deps["liger_kernel"],
+ deps["langdetect"],
+ deps["latex2sympy2_extended"],
+ deps["math-verify"],
+ deps["liger-kernel"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["safetensors"],
deps["sentencepiece"],
deps["transformers"],
deps["trl"],
+ deps["wandb"],
+ deps["async-lru"],
]
setup(
diff --git a/slurm/README.md b/slurm/README.md
new file mode 100644
index 000000000..029d633d3
--- /dev/null
+++ b/slurm/README.md
@@ -0,0 +1,30 @@
+## Serving DeepSeek-R1 on 2x8 H100 SLURM nodes with SGLang
+
+1. Set up the environment (adjust for your cuda version):
+```bash
+conda create -n sglang124 python=3.11
+conda activate sglang124
+
+pip install torch==2.5.1 --index-url https://download.pytorch.org/whl/cu124
+
+pip install sgl-kernel --force-reinstall --no-deps
+pip install "sglang[all]>=0.4.2.post4" --find-links https://flashinfer.ai/whl/cu124/torch2.5/flashinfer/
+```
+
+2. Run the server and wait for the model to load:
+```bash
+sbatch slurm/serve_r1.slurm -m "/fsx/deepseek-r1-checkpoint" -e "sglang124"
+```
+
+3. Run the data generation script:
+```bash
+python scripts/generate_reasoning.py \
+ --dataset-name "AI-MO/NuminaMath-1.5" \
+ --output-file "numinamath_r1_generations.jsonl" \
+ --prompt-column "problem" \
+ --uuid-column "problem" \
+ --api-addr ":39877" \
+ --num-generations 2 \
+ --max-tokens 16384 \
+ --max-concurrent 200
+```
\ No newline at end of file
diff --git a/slurm/compute_pass_rate.slurm b/slurm/compute_pass_rate.slurm
new file mode 100644
index 000000000..2c1cc54e7
--- /dev/null
+++ b/slurm/compute_pass_rate.slurm
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+#SBATCH --job-name=open-r1-compute-pass-rate
+#SBATCH --partition=hopper-prod
+#SBATCH --qos=normal
+#SBATCH --nodes=1
+#SBATCH --gpus-per-node=1
+#SBATCH --output=./logs/%x-%j.out
+#SBATCH --error=./logs/%x-%j.err
+#SBATCH --time=01-00:00:00
+#SBATCH --requeue
+
+# example usage: sbatch slurm/dataset_filter.slurm recipes/dataset_filtering/filter_dapo.yaml 0 500
+
+set -x -e
+
+source ~/.bashrc
+source openr1/bin/activate
+
+python scripts/pass_rate_filtering/compute_pass_rate.py --config $1 --dataset_start_index $2 --dataset_end_index $3
\ No newline at end of file
diff --git a/slurm/e2b_router.slurm b/slurm/e2b_router.slurm
new file mode 100644
index 000000000..5f1e2a673
--- /dev/null
+++ b/slurm/e2b_router.slurm
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+#SBATCH --partition=hopper-cpu
+#SBATCH --mem=16g
+#SBATCH --cpus-per-task=16
+#SBATCH --output=/fsx/open-r1/logs/e2b_router/%x-%j.out
+#SBATCH --error=/fsx/open-r1/logs/e2b_router/%x-%j.err
+#SBATCH --requeue
+#SBATCH --time=7-00:00:00
+
+echo "Starting job"
+set -x -e
+
+source ~/.bashrc
+source openr1/bin/activate
+
+srun python scripts/e2b_router.py
\ No newline at end of file
diff --git a/slurm/eval_callback.slurm b/slurm/eval_callback.slurm
deleted file mode 100644
index 093c3d067..000000000
--- a/slurm/eval_callback.slurm
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-#SBATCH --ntasks-per-node=1
-#SBATCH --gres=gpu:8
-#SBATCH --partition=hopper-prod
-#SBATCH --output=./logs/evaluate/%x-%j.out
-#SBATCH --err=./logs/evaluate/%x-%j.err
-#SBATCH --requeue
-
-set -x -e
-source ~/.bashrc
-conda activate openr1
-
-TASK_NAME=$1
-TASKS=$2
-MODEL_ID=$3
-MODEL_REVISION=$4
-# Optional args
-[ -z "$5"] && TENSOR_PARALLEL=False || TENSOR_PARALLEL=$5
-[ -z "$6"] && TRUST_REMOTE_CODE=False || TRUST_REMOTE_CODE=$6
-# $7 is reserved for system_prompt, see line 51
-NUM_GPUS=$(nvidia-smi -L | wc -l)
-
-# Set Whether to use tensor parallelism or data parallelism
-if [ "$TENSOR_PARALLEL" = "True" ]; then
- # use TP to shard model across NUM_GPUS
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
- MODEL_ARGS="pretrained=$MODEL_ID,revision=$MODEL_REVISION,trust_remote_code=$TRUST_REMOTE_CODE,dtype=bfloat16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
-else
- MODEL_ARGS="pretrained=$MODEL_ID,revision=$MODEL_REVISION,trust_remote_code=$TRUST_REMOTE_CODE,dtype=bfloat16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
-fi
-
-LM_EVAL_REPO_ID="open-r1/open-r1-eval-leaderboard"
-MODEL_NAME=$(echo $MODEL_ID | sed 's/\//_/g') # replaces / with _
-DETAILS_REPO_ID="open-r1//details-$MODEL_NAME"
-OUTPUT_DIR="eval_results/$MODEL_ID/$MODEL_REVISION/$TASK_NAME"
-# We need this flag since we run this script from training jobs that use DeepSpeed and the env vars get progated which causes errors during evaluation
-ACCELERATE_USE_DEEPSPEED=false
-# Enable fast downloads
-HF_HUB_ENABLE_HF_TRANSFER=1
-
-echo "Running lighteval script ..."
-echo "Eval results will be saved to $OUTPUT_DIR"
-# Check if "custom" is a substring of TASKS
-if [[ $TASKS == *"custom"* ]]; then
- echo "Custom task detected. Running custom task evaluation script ..."
- lighteval vllm $MODEL_ARGS $TASKS \
- --custom-tasks "src/open_r1/evaluate.py" \
- --use-chat-template \
- --output-dir $OUTPUT_DIR \
- --save-details \
- ${7:+--system-prompt "$7"}
-else
- lighteval vllm $MODEL_ARGS $TASKS \
- --use-chat-template \
- --output-dir $OUTPUT_DIR \
- --save-details \
- ${7:+--system-prompt "$7"}
-fi
-
-OUTPUT_FILEPATHS=$(find $OUTPUT_DIR/results/ -type f \( -name "*.json" \))
-for filepath in $OUTPUT_FILEPATHS; do
- echo "Uploading $filepath to Hugging Face Hub..."
- filename=$(basename -- "$filepath")
- huggingface-cli upload --repo-type space --private $LM_EVAL_REPO_ID $filepath $OUTPUT_DIR/$filename
-done
-
-echo "Uploading details to Hugging Face Hub..."
-DETAILS_FILEPATHS=$(find $OUTPUT_DIR/details/ -type f \( -name "*.parquet" \))
-echo "DETAILS_FILEPATHS: $DETAILS_FILEPATHS"
-TIMESTAMP=$(date +"%Y-%m-%dT%H-%M-%S")
-python src/open_r1/utils/upload_details.py --data_files $DETAILS_FILEPATHS --hub_repo_id $DETAILS_REPO_ID --config_name $MODEL_REVISION.$TASK_NAME.$TIMESTAMP
-
-echo "Cleaning up ..."
-rm -rf $OUTPUT_DIR
-
-echo "Done!"
\ No newline at end of file
diff --git a/slurm/evaluate.slurm b/slurm/evaluate.slurm
index 5fe7f8e33..dbfbb33f6 100644
--- a/slurm/evaluate.slurm
+++ b/slurm/evaluate.slurm
@@ -1,55 +1,83 @@
#!/bin/bash
-#SBATCH --job-name=open-r1-evaluate
-#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
-#SBATCH --exclusive
#SBATCH --gres=gpu:8
-#SBATCH --partition=hopper-prod
-#SBATCH --time=01:59:00
-#SBATCH --output=./logs/evaluate/%x-%j.out
-#SBATCH --err=./logs/evaluate/%x-%j.err
+#SBATCH --partition=hopper-prod
+#SBATCH --output=./logs/%x-%j.out
+#SBATCH --error=./logs/%x-%j.err
+#SBATCH --requeue
+#SBATCH --time=1-00:00:00
-# Usage: sbatch slurm/evaluate.slurm deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B aime24
+
+# Specific configuration optimized for the Hugging Face Compute Cluster
+# Be ye warned this may not work on other clusters!
+module load cuda/12.4
+
+# Refresh Weka on h4 cache
+echo "Refreshing Weka filesystem..."
+find -L /fsx/h4/ -type f | xargs -d '\n' -r -n512 -P64 weka fs tier fetch
+
+# Needed for vLLM
+export VLLM_WORKER_MULTIPROC_METHOD=spawn
set -x -e
source ~/.bashrc
-conda activate openr1
-module load cuda/12.1
-echo "START TIME: $(date)"
-echo "PYTHON ENV: $(which python)"
-
-
-NUM_GPUS=8
-MODEL=$1
-TASK=$2
-# Check if a third argument is passed, if it is tp then eval with tensor parallelism. Required for larger models
-if [ -n "$3" ] && [ "$3" == "tp" ]; then
- MODEL_ARGS="pretrained=$MODEL,dtype=float16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
-else
- MODEL_ARGS="pretrained=$MODEL,dtype=float16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilisation=0.8"
-fi
-OUTPUT_DIR=data/evals/$MODEL
+source openr1/bin/activate
+TASK_NAME=$1
+TASKS=$2
+MODEL_ID=$3
+MODEL_REVISION=$4
+# Optional args
+[ -z "$5"] && TENSOR_PARALLEL=False || TENSOR_PARALLEL=$5
+[ -z "$6"] && TRUST_REMOTE_CODE=False || TRUST_REMOTE_CODE=$6
+# $7 is reserved for system_prompt, see line 51
+NUM_GPUS=$(nvidia-smi -L | wc -l)
-# force crashing on nccl issues like hanging broadcast
-export NCCL_ASYNC_ERROR_HANDLING=1
-# export NCCL_DEBUG=INFO
-# export NCCL_DEBUG_SUBSYS=COLL
-# export NCCL_SOCKET_NTHREADS=1
-# export NCCL_NSOCKS_PERTHREAD=1
-# export CUDA_LAUNCH_BLOCKING=1
+# Use TP to shard model across GPUs
+if [ "$TENSOR_PARALLEL" = "True" ]; then
+ MODEL_ARGS="model_name=$MODEL_ID,revision=$MODEL_REVISION,trust_remote_code=$TRUST_REMOTE_CODE,dtype=bfloat16,tensor_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
+else
+ MODEL_ARGS="model_name=$MODEL_ID,revision=$MODEL_REVISION,trust_remote_code=$TRUST_REMOTE_CODE,dtype=bfloat16,data_parallel_size=$NUM_GPUS,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
+fi
-# Specific configuration optimized for the Hugging Face Compute Cluster
-# Be ye warned this may not work on other clusters!
-module load cuda/12.1
+LM_EVAL_REPO_ID="open-r1/open-r1-eval-leaderboard"
+MODEL_NAME=$(echo $MODEL_ID | sed 's/\//_/g') # replaces / with _
+DETAILS_REPO_ID="open-r1/details-$MODEL_NAME"
+OUTPUT_DIR="eval_results/$MODEL_ID/$MODEL_REVISION/$TASK_NAME"
+# We need this flag since we run this script from training jobs that use DeepSpeed and the env vars get progated which causes errors during evaluation
+ACCELERATE_USE_DEEPSPEED=false
-lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
- --custom-tasks src/open_r1/evaluate.py \
+echo "Running lighteval script ..."
+echo "Eval results will be saved to $OUTPUT_DIR"
+lighteval vllm "$MODEL_ARGS" $TASKS \
--use-chat-template \
- --system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
+ --output-dir $OUTPUT_DIR \
--save-details \
- --output-dir $OUTPUT_DIR
+ ${7:+--system-prompt "$(echo "$7" | base64 --decode)"}
+
+OUTPUT_FILEPATHS=$(find $OUTPUT_DIR/results/ -type f \( -name "*.json" \))
+for filepath in $OUTPUT_FILEPATHS; do
+ echo "Uploading $filepath to Hugging Face Hub..."
+ filename=$(basename -- "$filepath")
+ for attempt in {1..20}; do
+ if huggingface-cli upload --repo-type space --private $LM_EVAL_REPO_ID $filepath $OUTPUT_DIR/$filename; then
+ echo "Upload succeeded for $filepath"
+ break
+ else
+ echo "Upload failed for $filepath. Attempt $attempt of 20. Retrying in 5 seconds..."
+ sleep 5
+ fi
+ done
+done
+echo "Uploading details to Hugging Face Hub..."
+DETAILS_FILEPATHS=$(find $OUTPUT_DIR/details/ -type f \( -name "*.parquet" \))
+echo "DETAILS_FILEPATHS: $DETAILS_FILEPATHS"
+TIMESTAMP=$(date +"%Y-%m-%dT%H-%M-%S")
+python scripts/upload_details.py --data_files $DETAILS_FILEPATHS --hub_repo_id $DETAILS_REPO_ID --config_name $MODEL_REVISION.$TASK_NAME.$TIMESTAMP
+
+echo "Cleaning up ..."
+rm -rf $OUTPUT_DIR
-echo "END TIME: $(date)"
+echo "Done!"
diff --git a/slurm/experimental/serve_r1_vllm.slurm b/slurm/experimental/serve_r1_vllm.slurm
new file mode 100644
index 000000000..9f1ffd938
--- /dev/null
+++ b/slurm/experimental/serve_r1_vllm.slurm
@@ -0,0 +1,132 @@
+#!/bin/bash
+#SBATCH --job-name=r1-vllm
+#SBATCH --partition=hopper-prod
+#SBATCH --qos=normal
+#SBATCH --nodes=4
+#SBATCH --gpus-per-node=8
+#SBATCH --exclusive
+#SBATCH --output=./logs/%x_%j_%n.out
+#SBATCH --error=./logs/%x_%j_%n.err
+#SBATCH --time=7-00:00:00
+#SBATCH --ntasks-per-node=1
+
+set -exuo pipefail
+
+MODEL_PATH="deepseek-ai/DeepSeek-R1"
+CONDA_ENV="vllm7"
+SERVER_PORT=8000
+RAY_PORT=6379
+RAY_DASHBOARD_PORT=8265
+
+while getopts "m:e:h" opt; do
+ case $opt in
+ m) MODEL_PATH="$OPTARG" ;;
+ e) CONDA_ENV="$OPTARG" ;;
+ h|?) echo "Usage: sbatch $0 [-m MODEL_PATH] [-e CONDA_ENV]"; exit 1 ;;
+ esac
+done
+
+# Environment setup
+module load cuda/12.1
+source ~/.bashrc
+source "$CONDA_PREFIX/etc/profile.d/conda.sh"
+conda activate "$CONDA_ENV" || { echo "Failed to activate conda env $CONDA_ENV"; exit 1; }
+
+# Get nodes information
+NODES=($(scontrol show hostnames "$SLURM_JOB_NODELIST"))
+HEAD_NODE="${NODES[0]}"
+HEAD_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address)
+
+echo "SLURM_JOB_ID: $SLURM_JOB_ID"
+echo "SLURM_JOB_NODELIST: $SLURM_JOB_NODELIST"
+echo "Head node: $HEAD_NODE ($HEAD_NODE_IP)"
+
+# Start Ray head node
+echo "Starting Ray head node at $HEAD_NODE"
+srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" \
+ ray start --head \
+ --node-ip-address="$HEAD_NODE_IP" \
+ --port=$RAY_PORT \
+ --dashboard-host=0.0.0.0 \
+ --dashboard-port=$RAY_DASHBOARD_PORT \
+ --block &
+
+sleep 10
+
+# Start Ray worker nodes
+WORKER_COUNT=$((SLURM_JOB_NUM_NODES - 1))
+for ((i = 1; i <= WORKER_COUNT; i++)); do
+ WORKER_NODE="${NODES[$i]}"
+ echo "Starting Ray worker $i at $WORKER_NODE"
+ srun --nodes=1 --ntasks=1 -w "$WORKER_NODE" \
+ ray start --address "$HEAD_NODE_IP:$RAY_PORT" \
+ --block &
+ sleep 5
+done
+
+echo "Waiting for Ray cluster to initialize..."
+sleep 60
+
+# Start vLLM server
+echo "Starting vLLM server..."
+RAY_ADDRESS="http://$HEAD_NODE_IP:$RAY_DASHBOARD_PORT" ray job submit \
+ --working-dir src/open_r1 \
+ --no-wait \
+ --job-id vllm-server \
+ -- vllm serve "$MODEL_PATH" \
+ --tensor-parallel-size 8 \
+ --pipeline-parallel-size 4 \
+ --gpu-memory-utilization 0.90 \
+ --max-model-len 32768 \
+ --max-num-batched-tokens 262144 \
+ --max-num-seqs 128 \
+ --max-seq-len-to-capture 32768 \
+ --enable-chunked-prefill true \
+ --preemption-mode recompute \
+ --swap-space 128 \
+ --trust-remote-code \
+ --distributed-executor-backend ray
+
+# Wait for server with timeout
+TIMEOUT=3600 # 1h
+START_TIME=$(date +%s)
+echo "Waiting for vLLM server (http://$HEAD_NODE_IP:$SERVER_PORT)..."
+
+while true; do
+ if curl -s -o /dev/null -w "%{http_code}" "http://$HEAD_NODE_IP:$SERVER_PORT/health" >/dev/null 2>&1; then
+ echo "Server is ready at http://$HEAD_NODE_IP:$SERVER_PORT"
+ break
+ fi
+
+ CURRENT_TIME=$(date +%s)
+ if [ $((CURRENT_TIME - START_TIME)) -gt $TIMEOUT ]; then
+ echo "Error: Server failed to start within $TIMEOUT seconds"
+ exit 1
+ fi
+
+ echo "Still waiting... ($(($CURRENT_TIME - $START_TIME)) seconds elapsed)"
+ sleep 60
+done
+
+echo "Checking available models..."
+curl "http://$HEAD_NODE_IP:$SERVER_PORT/v1/models"
+sleep 10
+
+echo "Executing sanity check..."
+curl "http://$HEAD_NODE_IP:$SERVER_PORT/v1/completions" \
+ -H "Content-Type: application/json" \
+ -d "{
+ \"model\": \"default\",
+ \"prompt\": \"<|begin▁of▁sentence|><|User|>hi, how are you?<|Assistant|>\",
+ \"max_tokens\": 2048,
+ \"temperature\": 0.6
+ }"
+
+# Keep the job running with health checks
+while true; do
+ if ! curl -s -o /dev/null "http://$HEAD_NODE_IP:$SERVER_PORT/health"; then
+ echo "Error: Server health check failed"
+ exit 1
+ fi
+ sleep 300
+done
\ No newline at end of file
diff --git a/slurm/generate.slurm b/slurm/generate.slurm
index c154d64af..0935d6245 100644
--- a/slurm/generate.slurm
+++ b/slurm/generate.slurm
@@ -6,7 +6,7 @@
#SBATCH --exclusive
#SBATCH --gpus-per-node=8
#SBATCH --output=./logs/%x-%j.out
-#SBATCH --err=./logs/%x-%j.err
+#SBATCH --error=./logs/%x-%j.err
#SBATCH --time=04-00:00:00
# Parse command line arguments
@@ -122,14 +122,14 @@ echo "-------------------"
set -ex
-module load cuda/12.1
+module load cuda/12.4
export LD_LIBRARY_PATH=.venv/lib/python3.11/site-packages/nvidia/nvjitlink/lib
echo "SLURM_JOB_ID: $SLURM_JOB_ID"
echo "SLURM_JOB_NODELIST: $SLURM_JOB_NODELIST"
-source .venv/bin/activate
+source openr1/bin/activate
# Getting the node names
nodes=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
diff --git a/slurm/grpo.slurm b/slurm/grpo.slurm
deleted file mode 100644
index 4965dde22..000000000
--- a/slurm/grpo.slurm
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=open-r1-grpo
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=1
-#SBATCH --exclusive
-#SBATCH --gres=gpu:8
-#SBATCH --partition=hopper-prod
-#SBATCH --output=./logs/%x-%j.out
-#SBATCH --err=./logs/%x-%j.err
-
-set -x -e
-
-source ~/.bashrc
-conda activate openr1
-echo "START TIME: $(date)"
-echo "PYTHON ENV: $(which python)"
-
-MODEL_PATH=$1
-DATASET_PATH=$2
-ACCELERATOR=$3
-
-# Training setup
-NUM_NODES=$SLURM_NNODES
-GPUS_PER_NODE=8
-WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE))
-
-
-# so processes know who to talk to
-MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
-MASTER_PORT=6000
-
-export CMD=" \
- src/open_r1/grpo.py \
- --model_name_or_path $MODEL_PATH \
- --dataset_name $DATASET_PATH \
- --learning_rate 2.0e-5 \
- --num_train_epochs 1 \
- --max_completion_length 1024 \
- --max_prompt_length 512 \
- --per_device_train_batch_size 4 \
- --per_device_eval_batch_size 4 \
- --gradient_accumulation_steps 4 \
- --gradient_checkpointing \
- --bf16 \
- --use_vllm \
- --vllm_device auto \
- --vllm_gpu_memory_utilization 0.7 \
- --logging_steps 5 \
- --eval_strategy steps \
- --eval_steps 100 \
- --output_dir data/Qwen2.5-1.5B-Open-R1-GRPO
- "
-
-export LAUNCHER="HF_HUB_ENABLE_HF_TRANSFER=1 ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \
- --config_file configs/$ACCELERATOR.yaml \
- --num_processes $(($WORLD_SIZE - 1)) \
- --gradient_accumulation_steps 4 \
- --num_machines $NUM_NODES \
- --main_process_ip $MASTER_ADDR \
- --main_process_port $MASTER_PORT \
- --machine_rank \$SLURM_PROCID \
- --rdzv_conf "rdzv_backend=c10d,rdzv_endpoint=$MASTER_ADDR:$MASTER_PORT" \
- --max_restarts 1 \
- --role \$(hostname -s): \
- --tee 3 \
- "
-
-# force crashing on nccl issues like hanging broadcast
-export NCCL_ASYNC_ERROR_HANDLING=1
-# export NCCL_DEBUG=INFO
-# export NCCL_DEBUG_SUBSYS=COLL
-# export NCCL_SOCKET_NTHREADS=1
-# export NCCL_NSOCKS_PERTHREAD=1
-# export CUDA_LAUNCH_BLOCKING=1
-
-# Specific configuration optimized for the Hugging Face Compute Cluster
-# Be ye warned this may not work on other clusters!
-module load cuda/12.1
-
-# srun error handling:
-# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
-# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
-SRUN_ARGS=" \
- --wait=60 \
- --kill-on-bad-exit=1 \
- "
-
-clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --role \$SLURMD_NODENAME: $CMD" 2>&1
-
-echo "END TIME: $(date)"
diff --git a/slurm/morph_router.slurm b/slurm/morph_router.slurm
new file mode 100644
index 000000000..6e0001b3d
--- /dev/null
+++ b/slurm/morph_router.slurm
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+#SBATCH --partition=hopper-cpu
+#SBATCH --mem=16g
+#SBATCH --cpus-per-task=16
+#SBATCH --output=/fsx/open-r1/logs/morph_router/%x-%j.out
+#SBATCH --err=/fsx/open-r1/logs/morph_router/%x-%j.err
+#SBATCH --requeue
+#SBATCH --time=7-00:00:00
+
+
+echo "Starting job"
+set -x -e
+
+source ~/.bashrc
+source openr1/bin/activate
+
+srun python scripts/morph_router.py --port 8001 --max_num_sandboxes 20
diff --git a/slurm/piston/README.md b/slurm/piston/README.md
new file mode 100644
index 000000000..94699cff5
--- /dev/null
+++ b/slurm/piston/README.md
@@ -0,0 +1,81 @@
+# Piston workers (slurm)
+
+We have built a [piston](https://github.com/engineer-man/piston) package to run IOI problems.
+
+To launch a fleet of piston workers on a slurm cluster, you can adapt the paths in `launch_piston_workers.sh` and `launch_single_piston.sh` and run:
+```bash
+slurm/piston/launch_piston_workers.sh (number of workers to launch)
+```
+
+This command will launch a slurm job for each worker, which will be called `piston-worker-`, where `` is the port where the worker will be listening.
+
+## First time setup
+You will need to install the [IOI package](https://github.com/guipenedo/piston/tree/master/packages/cms_ioi/1.0.0) in the workers.
+1. Launch a single worker:
+```bash
+slurm/piston/launch_piston_workers.sh 1
+```
+
+2. Assuming it's running on `ip-10-53-86-146:1234`, send the package install request:
+
+For IOI:
+```bash
+curl -X POST http://ip-10-53-86-146:1234/api/v2/packages -H "Content-Type: application/json" -d '{"language": "cms_ioi", "version": "1.0.0"}'
+```
+
+For CodeForces:
+```bash
+curl -X POST http://ip-10-53-86-146:1234/api/v2/packages -H "Content-Type: application/json" -d '{"language": "codeforces", "version": "1.0.0"}'
+```
+
+3. You can now launch more workers and due to the shared mounted packages directory, they should already have the package installed.
+
+To have the main script find the workers automatically, you can export the following environment variable:
+```bash
+export PISTON_ENDPOINTS=slurm
+```
+Alternatively your can add `PISTON_ENDPOINTS=slurm` to your .env file.
+
+You can also change `PISTON_MAX_REQUESTS_PER_ENDPOINT`, which tries to limit how many simultaneous requests each worker will handle (1 by default). Keep in mind that this is a local limit and in distributed setups, as there is no global limit, workers might sometimes be overwhelmed when some processes hit the same worker.
+
+If you would like to adapt the code to run without piston, please see the [ioi repo](https://github.com/huggingface/ioi).
+For CodeForces, you should implement the [`run`](https://github.com/guipenedo/piston/blob/master/packages/codeforces/1.0.0/run) and [`compile`](https://github.com/guipenedo/piston/blob/master/packages/codeforces/1.0.0/compile) scripts.
+
+# Piston workers (local docker)
+This will launch a single worker in a docker container. Consider launching multiple workers for better scalability. Replace 2000 with the port you want to use.
+Make sure to change `/path/to/local/packages` to the path you want to persist for package installs.
+
+```bash
+docker run -d \
+ --name piston_worker \
+ -v /path/to/local/packages:/piston/packages \
+ -e PORT=2000 \
+ -e PISTON_COMPILE_TIMEOUT=60000 \
+ -e PISTON_RUN_TIMEOUT=60000 \
+ -e PISTON_OUTPUT_MAX_SIZE=1000000000 \
+ -e PISTON_MAX_FILE_SIZE=1000000000 \
+ -e PISTON_DISABLE_NETWORKING=true \
+ -e PISTON_REPO_URL=https://github.com/guipenedo/piston/releases/download/pkgs/index \
+ -p 2000:2000 \
+ --entrypoint /bin/bash \
+ ghcr.io/engineer-man/piston@sha256:63b5654156a89c5a2ad281aface21416615d62ec056d88efe8fcd307ce73575a \
+ -c "sed -i '/app.use(body_parser.urlencoded/c\ app.use(body_parser.urlencoded({ extended: true, limit: \"512mb\" }));' src/index.js && \
+ sed -i '/app.use(body_parser.json/c\ app.use(body_parser.json({ limit: \"512mb\" }));' src/index.js && \
+ node src"
+```
+
+Install the package:
+For IOI:
+```bash
+curl -X POST http://localhost:2000/api/v2/packages -H "Content-Type: application/json" -d '{"language": "cms_ioi", "version": "1.0.0"}'
+```
+
+For CodeForces:
+```bash
+curl -X POST http://localhost:2000/api/v2/packages -H "Content-Type: application/json" -d '{"language": "codeforces", "version": "1.0.0"}'
+```
+
+Remember to set `PISTON_ENDPOINTS`:
+```bash
+export PISTON_ENDPOINTS=http://localhost:2000/api/v2,http://localhost:2001/api/v2,http://localhost:2002/api/v2
+```
diff --git a/slurm/piston/launch_piston_workers.sh b/slurm/piston/launch_piston_workers.sh
new file mode 100755
index 000000000..908efcc2e
--- /dev/null
+++ b/slurm/piston/launch_piston_workers.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+# this simple script will launch a bunch of piston workers on the HF science cluster
+
+N_INSTANCES=${1:-5} # Default to 5 instances
+
+for i in $(seq 1 $N_INSTANCES); do
+ # Find random (hopefully) available port
+ PORT=$(comm -23 <(seq 2000 10000 | sort) <(ss -tan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n1)
+
+ # the job name format is important for the code to then be able to get a list of workers. `piston-worker-`
+ sbatch \
+ --job-name="piston-worker-$PORT" \
+ --export=ALL,PORT=$PORT \
+ slurm/piston/launch_single_piston.sh
+done
\ No newline at end of file
diff --git a/slurm/piston/launch_single_piston.sh b/slurm/piston/launch_single_piston.sh
new file mode 100755
index 000000000..27bc65bf2
--- /dev/null
+++ b/slurm/piston/launch_single_piston.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#SBATCH --job-name=piston_worker
+#SBATCH --output=/fsx/open-r1/logs/piston/worker-logs/%x-%j.out
+#SBATCH --error=/fsx/open-r1/logs/piston/worker-logs/%x-%j.out # Redirect error logs to .out
+#SBATCH --cpus-per-task=2
+#SBATCH --mem-per-cpu=1950M
+#SBATCH --partition=hopper-cpu
+#SBATCH --time=48:00:00
+
+# sometimes if a bunch of workers start at the same time pyxis dies
+sleep $(( RANDOM % 20 ))
+
+# mounting the packages folder lets us not have to manually install the package on each instance
+# we use 63b5654156a89c5a2ad281aface21416615d62ec056d88efe8fcd307ce73575a as the latest image requires isolate, which does not work on the HF science cluster (cgroups incompatibility)
+# feel free try with the latest image
+# the code you see below increases the very constrained piston default limits, and sets the repo url to the one hosting our IOI package
+srun --container-mounts=/fsx/guilherme/ioi2024/piston_files/packages:/piston/packages --container-image "ghcr.io#engineer-man/piston:sha256:63b5654156a89c5a2ad281aface21416615d62ec056d88efe8fcd307ce73575a" \
+ bash -c "
+ export PISTON_COMPILE_TIMEOUT=60000
+ export PISTON_RUN_TIMEOUT=60000
+ export PISTON_OUTPUT_MAX_SIZE=1000000000
+ export PISTON_MAX_FILE_SIZE=1000000000
+ export PISTON_DISABLE_NETWORKING=true
+ export PISTON_REPO_URL=https://github.com/guipenedo/piston/releases/download/pkgs/index
+
+ sed -i '/app.use(body_parser.urlencoded/c\ app.use(body_parser.urlencoded({ extended: true, limit: \"512mb\" }));' src/index.js
+ sed -i '/app.use(body_parser.json/c\ app.use(body_parser.json({ limit: \"512mb\" }));' src/index.js
+
+ # Start server in background
+ node src
+ "
diff --git a/slurm/serve_r1.slurm b/slurm/serve_r1.slurm
new file mode 100644
index 000000000..6cb3719db
--- /dev/null
+++ b/slurm/serve_r1.slurm
@@ -0,0 +1,109 @@
+#!/bin/bash
+#SBATCH --job-name=r1-server
+#SBATCH --partition=hopper-prod
+#SBATCH --qos=normal
+#SBATCH --nodes=2
+#SBATCH --gpus-per-node=8
+#SBATCH --exclusive
+#SBATCH --output=./logs/%x_%j_%n.out
+#SBATCH --error=./logs/%x_%j_%n.err
+#SBATCH --time=7-00:00:00
+#SBATCH --ntasks-per-node=1
+
+set -exuo pipefail
+
+MODEL_PATH="deepseek-ai/DeepSeek-R1"
+CONDA_ENV="sglang124"
+ROUTER_ADDRESS=""
+SERVER_PORT=39877
+DIST_PORT=45000
+
+# TODO: Adjust these variables to your cluster configuration
+export OUTLINES_CACHE_DIR=/scratch/serve_r1/ocache/
+export TRITON_HOME=/scratch/serve_r1/triton/
+export GLOO_SOCKET_IFNAME="enp71s0"
+export NCCL_SOCKET_IFNAME="enp71s0"
+
+while getopts "m:e:r:h" opt; do
+ case $opt in
+ m) MODEL_PATH="$OPTARG" ;;
+ e) CONDA_ENV="$OPTARG" ;;
+ r) ROUTER_ADDRESS="$OPTARG" ;;
+ h|?) echo "Usage: sbatch $0 [-m MODEL_PATH] [-e CONDA_ENV] [-r ROUTER_ADDRESS]"; exit 1 ;;
+ esac
+done
+
+# TODO: Environment setup, adjust to your cluster configuration
+module load cuda/12.4
+source ~/.bashrc
+source "$CONDA_PREFIX/etc/profile.d/conda.sh"
+conda activate "$CONDA_ENV" || { echo "Failed to activate conda env $CONDA_ENV"; exit 1; }
+
+FIRST_NODE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n1)
+FIRST_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$FIRST_NODE" hostname --ip-address)
+
+# Launch servers synchronously across all nodes
+# (--max-running-requests=56 is rough estimate to avoid too many evicted/preempted 16k-long requests)
+srun --nodes=2 --ntasks=2 --ntasks-per-node=1 \
+ bash -c "python -m sglang.launch_server \
+ --model-path '$MODEL_PATH' \
+ --tp 16 \
+ --dist-init-addr '$FIRST_NODE_IP:$DIST_PORT' \
+ --nnodes 2 \
+ --node-rank \$SLURM_PROCID \
+ --port '$SERVER_PORT' \
+ --host 0.0.0.0 \
+ --trust-remote-code \
+ --max-running-requests 56 \
+ --context-length 32768" &
+
+# Wait for server with timeout
+TIMEOUT=3600 # 1h, but model loading should take ~30min
+START_TIME=$(date +%s)
+echo "Waiting for SGLang server (http://$FIRST_NODE_IP:$SERVER_PORT)..."
+
+while true; do
+ if curl -s -o /dev/null -w "%{http_code}" "http://$FIRST_NODE_IP:$SERVER_PORT/health" >/dev/null 2>&1; then
+ echo "Server is ready at http://$FIRST_NODE_IP:$SERVER_PORT"
+ break
+ fi
+
+ CURRENT_TIME=$(date +%s)
+ if [ $((CURRENT_TIME - START_TIME)) -gt $TIMEOUT ]; then
+ echo "Error: Server failed to start within $TIMEOUT seconds"
+ exit 1
+ fi
+
+ echo "Still waiting... ($(($CURRENT_TIME - $START_TIME)) seconds elapsed)"
+ sleep 60
+done
+
+# Register with router only if address was provided
+if [ -n "$ROUTER_ADDRESS" ]; then
+ echo "Registering with router at $ROUTER_ADDRESS..."
+ curl -X POST "http://$ROUTER_ADDRESS/add_worker?url=http://$FIRST_NODE_IP:$SERVER_PORT" || true
+ sleep 10
+fi
+
+echo "Checking available models..."
+curl "http://$FIRST_NODE_IP:$SERVER_PORT/v1/models"
+sleep 10
+
+echo "Executing sanity check..."
+curl "http://$FIRST_NODE_IP:$SERVER_PORT/v1/completions" \
+ -H "Content-Type: application/json" \
+ -d "{
+ \"model\": \"default\",
+ \"prompt\": \"<|begin▁of▁sentence|><|User|>hi, how are you?<|Assistant|>\",
+ \"max_tokens\": 2048,
+ \"temperature\": 0.6
+ }"
+
+# Keep the job running with health checks
+while true; do
+ if ! curl -s -o /dev/null "http://$FIRST_NODE_IP:$SERVER_PORT/health"; then
+ echo "Error: Server health check failed"
+ exit 1
+ fi
+ sleep 300
+done
\ No newline at end of file
diff --git a/slurm/serve_router.slurm b/slurm/serve_router.slurm
new file mode 100644
index 000000000..0fe96177f
--- /dev/null
+++ b/slurm/serve_router.slurm
@@ -0,0 +1,45 @@
+#!/bin/bash
+#SBATCH --job-name=r1-router
+#SBATCH --partition=hopper-cpu
+#SBATCH --qos=high
+#SBATCH --nodes=1
+#SBATCH --cpus-per-task=8
+#SBATCH --mem-per-cpu=1875m
+#SBATCH --output=./logs/%x_%j_%n.out
+#SBATCH --error=./logs/%x_%j_%n.err
+#SBATCH --time=30-00:00:00
+#SBATCH --requeue
+
+set -exuo pipefail
+
+# TODO: Adjust these variables to your cluster configuration
+CONDA_ENV="sglang124"
+ROUTER_PORT=39876
+
+trap 'scontrol requeue ${SLURM_JOB_ID}; exit 15' SIGUSR1
+
+while getopts "e:h" opt; do
+ case $opt in
+ e) CONDA_ENV="$OPTARG" ;;
+ h|?) echo "Usage: sbatch $0 [-e CONDA_ENV]"; exit 1 ;;
+ esac
+done
+
+# TODO: Environment setup, adjust to your cluster configuration
+source ~/.bashrc
+source "$CONDA_PREFIX/etc/profile.d/conda.sh"
+conda activate "$CONDA_ENV" || { echo "Failed to activate conda env $CONDA_ENV"; exit 1; }
+
+python -m sglang_router.launch_router \
+ --port "$ROUTER_PORT" \
+ --host 0.0.0.0 \
+ --worker-startup-timeout-secs 300
+
+# Keep the job running with health checks
+while true; do
+ if ! curl -s -o /dev/null "http://localhost:$ROUTER_PORT/health"; then
+ echo "Error: Router health check failed"
+ exit 1
+ fi
+ sleep 300
+done
\ No newline at end of file
diff --git a/slurm/sft.slurm b/slurm/sft.slurm
deleted file mode 100644
index 60c1949ae..000000000
--- a/slurm/sft.slurm
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=open-r1-sft
-#SBATCH --nodes=1
-#SBATCH --ntasks-per-node=1
-#SBATCH --exclusive
-#SBATCH --gres=gpu:8
-#SBATCH --partition=hopper-prod
-#SBATCH --output=./logs/%x-%j.out
-#SBATCH --err=./logs/%x-%j.err
-
-set -x -e
-
-source ~/.bashrc
-conda activate openr1
-echo "START TIME: $(date)"
-echo "PYTHON ENV: $(which python)"
-
-MODEL_PATH=$1
-DATASET_PATH=$2
-ACCELERATOR=$3
-
-# Training setup
-NUM_NODES=$SLURM_NNODES
-GPUS_PER_NODE=8
-WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE))
-
-# so processes know who to talk to
-MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
-MASTER_PORT=6000
-
-export CMD=" \
- src/open_r1/sft.py \
- --model_name_or_path $MODEL_PATH \
- --dataset_name $DATASET_PATH \
- --use_liger_kernel true \
- --learning_rate 2.0e-5 \
- --num_train_epochs 1 \
- --packing \
- --max_seq_length 4096 \
- --per_device_train_batch_size 4 \
- --per_device_eval_batch_size 4 \
- --gradient_accumulation_steps 4 \
- --gradient_checkpointing \
- --bf16 \
- --logging_steps 5 \
- --eval_strategy steps \
- --eval_steps 100 \
- --output_dir data/Qwen2.5-1.5B-Open-R1-Distill
- "
-
-export LAUNCHER="HF_HUB_ENABLE_HF_TRANSFER=1 ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \
- --config_file configs/$ACCELERATOR.yaml \
- --gradient_accumulation_steps 4 \
- --num_machines $NUM_NODES \
- --num_processes $WORLD_SIZE \
- --main_process_ip $MASTER_ADDR \
- --main_process_port $MASTER_PORT \
- --machine_rank \$SLURM_PROCID \
- --rdzv_conf "rdzv_backend=c10d,rdzv_endpoint=$MASTER_ADDR:$MASTER_PORT" \
- --max_restarts 1 \
- --role \$(hostname -s): \
- --tee 3 \
- "
-
-# force crashing on nccl issues like hanging broadcast
-export NCCL_ASYNC_ERROR_HANDLING=1
-# export NCCL_DEBUG=INFO
-# export NCCL_DEBUG_SUBSYS=COLL
-# export NCCL_SOCKET_NTHREADS=1
-# export NCCL_NSOCKS_PERTHREAD=1
-# export CUDA_LAUNCH_BLOCKING=1
-
-# Specific configuration optimized for the Hugging Face Compute Cluster
-# Be ye warned this may not work on other clusters!
-module load cuda/12.1
-
-# srun error handling:
-# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
-# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
-SRUN_ARGS=" \
- --wait=60 \
- --kill-on-bad-exit=1 \
- "
-
-clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --role \$SLURMD_NODENAME: $CMD" 2>&1
-
-echo "END TIME: $(date)"
diff --git a/slurm/train.slurm b/slurm/train.slurm
new file mode 100644
index 000000000..15a70d62c
--- /dev/null
+++ b/slurm/train.slurm
@@ -0,0 +1,182 @@
+#!/bin/bash
+#SBATCH --job-name=open_r1
+#SBATCH --ntasks-per-node=1
+#SBATCH --exclusive
+#SBATCH --gres=gpu:8
+#SBATCH --partition=hopper-prod # Adjust this for your cluster
+#SBATCH --output=./logs/%x-%j.out
+#SBATCH --error=./logs/%x-%j.err
+#SBATCH --requeue
+#SBATCH --time=3-00:00:00
+
+
+if [[ "$*" == *"--help"* ]]; then
+ echo "Usage: sbatch slurm/train.slurm [options]"
+ echo "Options:"
+ echo " --model MODEL Model name"
+ echo " --task TASK Task name (e.g. sft, grpo)"
+ echo " --config SUFFIX Configuration suffix (e.g. demo, v00.00)"
+ echo " --accelerator CONFIG Accelerator configuration name (e.g. zero3)"
+ echo " --dp N Data parallelism for vLLM server (default: 1)"
+ echo " --tp N Tensor parallelism for vLLM server (default: 1)"
+ echo " --args \"ARGS\" Optional arguments to pass to the training script"
+ exit 0
+fi
+
+# Specific configuration optimized for the Hugging Face Compute Cluster
+module load cuda/12.4
+set -x -e
+
+source ~/.bashrc
+source openr1/bin/activate
+START_TIME=$(date +%s)
+echo "START TIME: $(date)"
+
+# Refresh Weka on h4 cache
+echo "Refreshing Weka filesystem..."
+find -L /fsx/h4/ -type f | xargs -d '\n' -r -n512 -P64 weka fs tier fetch
+
+# Default values
+MODEL=""
+TASK=""
+CONFIG_SUFFIX=""
+ACCELERATOR=""
+DP=1
+TP=1
+OPTIONAL_ARGS=""
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --model)
+ MODEL="$2"
+ shift 2
+ ;;
+ --task)
+ TASK="$2"
+ shift 2
+ ;;
+ --config)
+ CONFIG_SUFFIX="$2"
+ shift 2
+ ;;
+ --accelerator)
+ ACCELERATOR="$2"
+ shift 2
+ ;;
+ --dp)
+ DP="$2"
+ shift 2
+ ;;
+ --tp)
+ TP="$2"
+ shift 2
+ ;;
+ --args)
+ OPTIONAL_ARGS="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown option: $1"
+ echo "Use --help for usage information"
+ exit 1
+ ;;
+ esac
+done
+
+# Validate required arguments
+if [[ -z "$MODEL" || -z "$TASK" || -z "$CONFIG_SUFFIX" || -z "$ACCELERATOR" ]]; then
+ echo "Error: Missing required arguments"
+ echo "Run with --help for usage information"
+ exit 1
+fi
+
+
+CONFIG_FILE=recipes/$MODEL/$TASK/config_$CONFIG_SUFFIX.yaml
+GRAD_ACC_STEPS=$(grep 'gradient_accumulation_steps' $CONFIG_FILE | awk '{print $2}')
+
+# Split the string into individual arguments
+IFS=' ' read -ra ARGS <<< "$OPTIONAL_ARGS"
+# Loop through the arguments and find the one with "--gradient_accumulation_steps"
+for arg in "${ARGS[@]}"; do
+ if [[ "$arg" == "--gradient_accumulation_steps="* ]]; then
+ # Extract the value after the equals sign
+ GRAD_ACC_STEPS="${arg#*=}"
+ break # Exit the loop once we find the desired argument
+ fi
+done
+
+echo "Gradient accumulation steps: $GRAD_ACC_STEPS"
+
+MODEL=$(grep 'model_name_or_path:' $CONFIG_FILE | awk '{print $2}')
+REVISION=$(grep 'model_revision:' $CONFIG_FILE | head -n 1 | awk '{print $2}')
+
+# Distributed configuration
+NUM_NODES=$SLURM_NNODES
+GPUS_PER_NODE=8
+WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE))
+NODELIST=($(scontrol show hostnames $SLURM_JOB_NODELIST))
+MASTER_ADDR=${NODELIST[0]} # First node for main process
+MASTER_PORT=6000
+TRAIN_NODES=("${NODELIST[@]}")
+
+USE_VLLM="false"
+if [[ -f "$CONFIG_FILE" ]] && grep -qE '^\s*use_vllm:\s*true' "$CONFIG_FILE"; then
+ USE_VLLM="true"
+fi
+# if using vllm
+if [[ "$USE_VLLM" == "true" ]]; then
+ TRAIN_NODES=("${NODELIST[@]:0:$((NUM_NODES - 1))}")
+ VLLM_NODE=${NODELIST[-1]} # Last node
+ WORLD_SIZE=$((WORLD_SIZE - GPUS_PER_NODE))
+ NUM_NODES=$((NUM_NODES - 1))
+ srun --nodes=1 --ntasks=1 --nodelist=$VLLM_NODE trl vllm-serve --model $MODEL --revision $REVISION --tensor_parallel_size $TP --data_parallel_size $DP &
+
+ OPTIONAL_ARGS="$OPTIONAL_ARGS --vllm_server_host=$VLLM_NODE"
+fi
+
+# force crashing on nccl issues like hanging broadcast
+export NCCL_ASYNC_ERROR_HANDLING=1
+# export NCCL_DEBUG=INFO
+# export NCCL_DEBUG_SUBSYS=COLL
+# export NCCL_SOCKET_NTHREADS=1
+# export NCCL_NSOCKS_PERTHREAD=1
+# export CUDA_LAUNCH_BLOCKING=1
+
+export CMD=" \
+ src/open_r1/$TASK.py --config $CONFIG_FILE $OPTIONAL_ARGS
+ "
+
+export LAUNCHER="ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \
+ --config_file recipes/accelerate_configs/$ACCELERATOR.yaml \
+ --gradient_accumulation_steps $GRAD_ACC_STEPS \
+ --num_machines $NUM_NODES \
+ --num_processes $WORLD_SIZE \
+ --main_process_ip $MASTER_ADDR \
+ --main_process_port $MASTER_PORT \
+ --machine_rank $SLURM_PROCID \
+ --rdzv_backend=c10d \
+ --max_restarts 1 \
+ --tee 3 \
+ "
+# srun error handling:
+# --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
+# --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
+NODELIST=$(IFS=,; echo "${TRAIN_NODES[*]}")
+
+SRUN_ARGS=" \
+ --wait=60 \
+ --kill-on-bad-exit=1 \
+ --nodes=$NUM_NODES \
+ --ntasks=$NUM_NODES \
+ --nodelist=$NODELIST
+ "
+srun $SRUN_ARGS bash -c "$LAUNCHER $CMD" 2>&1
+
+END_TIME=$(date +%s)
+echo "END TIME: $(date)"
+ELAPSED_SECONDS=$((END_TIME - START_TIME))
+HOURS=$((ELAPSED_SECONDS / 3600))
+MINUTES=$(( (ELAPSED_SECONDS % 3600) / 60 ))
+SECONDS=$((ELAPSED_SECONDS % 60))
+echo "TOTAL JOB TIME: ${HOURS}h ${MINUTES}m ${SECONDS}s (${ELAPSED_SECONDS} seconds)"
diff --git a/src/open_r1/configs.py b/src/open_r1/configs.py
index 8aa12f954..ddb6e53b0 100644
--- a/src/open_r1/configs.py
+++ b/src/open_r1/configs.py
@@ -14,11 +14,112 @@
# limitations under the License.
from dataclasses import dataclass, field
-from typing import Optional
+from typing import Any, Literal, Optional
import trl
+@dataclass
+class DatasetConfig:
+ """Configuration for a dataset in a mixture."""
+
+ id: str
+ config: Optional[str] = None
+ split: str = "train"
+ columns: Optional[list[str]] = None
+ weight: Optional[float] = None
+
+
+@dataclass
+class DatasetMixtureConfig:
+ """Configuration for a mixture of datasets."""
+
+ datasets: list[DatasetConfig]
+ seed: int = 0
+ test_split_size: Optional[float] = None
+
+
+@dataclass
+class ScriptArguments(trl.ScriptArguments):
+ """
+ Extended version of ScriptArguments with support for dataset mixtures.
+
+ Args:
+ dataset_mixture (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
+ Configuration for creating dataset mixtures with advanced options.
+ Format:
+ dataset_mixture:
+ datasets:
+ - id: dataset_id1
+ config: config_name
+ columns:
+ - col1
+ - col2
+ weight: 0.5
+ - id: dataset_id2
+ config: config_name
+ columns:
+ - col1
+ - col2
+ weight: 0.5
+ seed: 42
+ test_split_size: 0.1
+ """
+
+ # Override the dataset_name to make it optional
+ dataset_name: Optional[str] = field(
+ default=None, metadata={"help": "Dataset name. Can be omitted if using dataset_mixture."}
+ )
+ dataset_mixture: Optional[dict[str, Any]] = field(
+ default=None,
+ metadata={"help": "Configuration for creating dataset mixtures with advanced options like shuffling."},
+ )
+
+ def __post_init__(self):
+ if self.dataset_name is None and self.dataset_mixture is None:
+ raise ValueError("Either `dataset_name` or `dataset_mixture` must be provided")
+
+ if self.dataset_mixture is not None:
+ if not isinstance(self.dataset_mixture, dict) or "datasets" not in self.dataset_mixture:
+ raise ValueError(
+ "dataset_mixture must be a dictionary with a 'datasets' key. "
+ "Expected format: {'datasets': [...], 'seed': int}"
+ )
+
+ datasets_list = []
+ datasets_data = self.dataset_mixture.get("datasets", [])
+
+ if isinstance(datasets_data, list):
+ for dataset_config in datasets_data:
+ datasets_list.append(
+ DatasetConfig(
+ id=dataset_config.get("id"),
+ config=dataset_config.get("config"),
+ split=dataset_config.get("split", "train"),
+ columns=dataset_config.get("columns"),
+ weight=dataset_config.get("weight", 1.0),
+ )
+ )
+ else:
+ raise ValueError("'datasets' must be a list of dataset configurations")
+
+ self.dataset_mixture = DatasetMixtureConfig(
+ datasets=datasets_list,
+ seed=self.dataset_mixture.get("seed", 0),
+ test_split_size=self.dataset_mixture.get("test_split_size", None),
+ )
+
+ # Check that column names are consistent across all dataset configs
+ columns_sets = [set(dataset.columns) for dataset in datasets_list if dataset.columns is not None]
+ if columns_sets:
+ first_columns = columns_sets[0]
+ if not all(columns == first_columns for columns in columns_sets):
+ raise ValueError(
+ "Column names must be consistent across all dataset configurations in a mixture. "
+ f"Found different column sets: {[list(cols) for cols in columns_sets]}"
+ )
+
+
# TODO: add the shared options with a mixin to reduce code duplication
@dataclass
class GRPOConfig(trl.GRPOConfig):
@@ -28,21 +129,41 @@ class GRPOConfig(trl.GRPOConfig):
benchmarks: list[str] = field(
default_factory=lambda: [],
- metadata={"help": ("The benchmarks to run after training.")},
+ metadata={"help": "The benchmarks to run after training."},
)
callbacks: list[str] = field(
- default_factory=lambda: [], metadata={"help": ("The callbacks to run during training.")}
+ default_factory=lambda: [],
+ metadata={"help": "The callbacks to run during training."},
+ )
+ chat_template: Optional[str] = field(default=None, metadata={"help": "The chat template to use."})
+ hub_model_revision: Optional[str] = field(
+ default="main", metadata={"help": "The Hub model branch to push the model to."}
)
+ num_completions_to_print: int = field(default=0, metadata={"help": "Number of completions to print."})
+ overwrite_hub_revision: bool = field(default=False, metadata={"help": "Whether to overwrite the Hub revision."})
+ push_to_hub_revision: bool = field(default=False, metadata={"help": "Whether to push to a Hub revision/branch."})
system_prompt: Optional[str] = field(
default=None,
- metadata={"help": ("The optional system prompt to use for benchmarking.")},
+ metadata={"help": "The optional system prompt to use."},
)
- hub_model_revision: Optional[str] = field(
- default="main",
- metadata={"help": ("The Hub model branch to push the model to.")},
+ wandb_log_unique_prompts: bool = field(
+ default=True,
+ metadata={
+ "help": ("Whether to log the unique prompts to wandb. This will create a new run for each unique prompt.")
+ },
+ )
+ wandb_entity: Optional[str] = field(
+ default=None,
+ metadata={"help": ("The entity to store runs under.")},
+ )
+ wandb_project: Optional[str] = field(
+ default=None,
+ metadata={"help": ("The project to store runs under.")},
+ )
+ wandb_run_group: Optional[str] = field(
+ default=None,
+ metadata={"help": ("The group to store runs under.")},
)
- overwrite_hub_revision: bool = field(default=False, metadata={"help": ("Whether to overwrite the Hub revision.")})
- push_to_hub_revision: bool = field(default=False, metadata={"help": ("Whether to push to a Hub revision/branch.")})
@dataclass
@@ -53,18 +174,158 @@ class SFTConfig(trl.SFTConfig):
benchmarks: list[str] = field(
default_factory=lambda: [],
- metadata={"help": ("The benchmarks to run after training.")},
+ metadata={"help": "The benchmarks to run after training."},
)
callbacks: list[str] = field(
- default_factory=lambda: [], metadata={"help": ("The callbacks to run during training.")}
+ default_factory=lambda: [],
+ metadata={"help": "The callbacks to run during training."},
)
+ chat_template: Optional[str] = field(default=None, metadata={"help": "The chat template to use."})
system_prompt: Optional[str] = field(
default=None,
- metadata={"help": ("The optional system prompt to use for benchmarking.")},
+ metadata={"help": "The optional system prompt to use for benchmarking."},
)
hub_model_revision: Optional[str] = field(
default="main",
- metadata={"help": ("The Hub model branch to push the model to.")},
+ metadata={"help": "The Hub model branch to push the model to."},
+ )
+ overwrite_hub_revision: bool = field(default=False, metadata={"help": "Whether to overwrite the Hub revision."})
+ push_to_hub_revision: bool = field(default=False, metadata={"help": "Whether to push to a Hub revision/branch."})
+ wandb_entity: Optional[str] = field(
+ default=None,
+ metadata={"help": ("The entity to store runs under.")},
+ )
+ wandb_project: Optional[str] = field(
+ default=None,
+ metadata={"help": ("The project to store runs under.")},
+ )
+ wandb_run_group: Optional[str] = field(
+ default=None,
+ metadata={"help": ("The group to store runs under.")},
+ )
+
+
+@dataclass
+class GRPOScriptArguments(ScriptArguments):
+ """
+ Script arguments for the GRPO training script.
+
+ Args:
+ reward_funcs (`list[str]`):
+ List of reward functions. Possible values: 'accuracy', 'format', 'reasoning_steps', 'cosine', 'repetition_penalty', 'length', 'tag_count', 'code', 'ioi_code', 'code_format', 'soft_overlong_punishment'.
+ cosine_min_value_wrong (`float`):
+ Minimum reward for cosine scaling for wrong answers.
+ cosine_max_value_wrong (`float`):
+ Maximum reward for cosine scaling for wrong answers.
+ cosine_min_value_correct (`float`):
+ Minimum reward for cosine scaling for correct answers.
+ cosine_max_value_correct (`float`):
+ Maximum reward for cosine scaling for correct answers.
+ cosine_max_len (`int`):
+ Maximum length for cosine scaling.
+ code_language (`str`):
+ Language for code format reward.
+ max_completion_len (`int`):
+ Maximum number of tokens in completion.
+ soft_punish_cache (`int`):
+ Minimum number of tokens in completion.
+ """
+
+ reward_funcs: list[str] = field(
+ default_factory=lambda: ["accuracy", "format", "tag_count"],
+ metadata={
+ "help": "List of reward functions. Possible values: 'accuracy', 'format', 'reasoning_steps', 'cosine', 'repetition_penalty', 'length', tag_count', 'code', 'code_format'"
+ },
+ )
+ cosine_min_value_wrong: float = field(
+ default=0.0,
+ metadata={"help": "Minimum reward for wrong answers"},
+ )
+ cosine_max_value_wrong: float = field(
+ default=-0.5,
+ metadata={"help": "Maximum reward for wrong answers"},
+ )
+ cosine_min_value_correct: float = field(
+ default=0.5,
+ metadata={"help": "Minimum reward for correct answers"},
+ )
+ cosine_max_value_correct: float = field(
+ default=1.0,
+ metadata={"help": "Maximum reward for correct answers"},
+ )
+ cosine_max_len: int = field(
+ default=1000,
+ metadata={"help": "Maximum length for scaling"},
+ )
+ repetition_n_grams: int = field(
+ default=3,
+ metadata={"help": "Number of n-grams for repetition penalty reward"},
+ )
+ repetition_max_penalty: float = field(
+ default=-1.0,
+ metadata={"help": "Maximum (negative) penalty for for repetition penalty reward"},
+ )
+ code_language: str = field(
+ default="python",
+ # '(?:python|cpp)'
+ metadata={
+ "help": "Language for code format reward. Based on E2B supported languages https://e2b.dev/docs/code-interpreting/supported-languages",
+ "choices": ["python", "javascript", "r", "java", "bash", "cpp"],
+ },
+ )
+ code_eval_test_batch_size: int = field(
+ default=1,
+ metadata={
+ "help": "for each generation, evaluate these many test cases in parallel, then check if any of them failed (0 score): if so stop evaluating; otherwise continue with the next batch of test cases. Useful to avoid overloading the eval server + save time on wrong solutions"
+ },
+ )
+ code_eval_scoring_mode: Literal["pass_fail", "partial", "weighted_sum"] = field(
+ default="weighted_sum",
+ metadata={"help": "use fraction of passed test cases as reward. If false, use 0/1 scoring."},
+ )
+ parallel_code_exec_per_proc: int = field(
+ default=2,
+ metadata={
+ "help": "Number of parallel E2B code executions per process. Default of 2 is suitable for the Free Hobby tier of E2B with 8 GPUs used for training."
+ },
+ )
+
+ dataset_prompt_column: str = field(
+ default="prompt",
+ metadata={"help": "Column to use as prompts for training."},
+ )
+
+ e2b_router_url: Optional[str] = field(
+ default=None,
+ metadata={"help": "URL for the E2B router. See scripts/e2b_router.py"},
+ )
+
+ morph_router_url: Optional[str] = field(
+ default=None,
+ metadata={"help": "URL for the MorphCloud router. See scripts/morph_router.py"},
+ )
+
+ code_provider: Optional[str] = field(
+ default="e2b",
+ metadata={
+ "help": "Provider for code execution. Options: 'e2b', 'local', 'morph'.",
+ "choices": ["e2b", "local", "morph"],
+ },
+ )
+
+ ioi_provider: Optional[str] = field(
+ default="piston",
+ metadata={
+ "help": "Provider for IOI code execution. Options: 'piston', 'morph'.",
+ "choices": ["piston", "morph"],
+ },
+ )
+
+ max_completion_len: int = field(
+ default=16384,
+ metadata={"help": "Maximum number of characters in completion."},
+ )
+ soft_punish_cache: int = field(
+ default=4096,
+ metadata={"help": "Minimum number of characters in completion."},
)
- overwrite_hub_revision: bool = field(default=False, metadata={"help": ("Whether to overwrite the Hub revision.")})
- push_to_hub_revision: bool = field(default=False, metadata={"help": ("Whether to push to a Hub revision/branch.")})
diff --git a/src/open_r1/evaluate.py b/src/open_r1/evaluate.py
deleted file mode 100644
index c800a889a..000000000
--- a/src/open_r1/evaluate.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2025 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Custom evaluation tasks for LightEval."""
-
-from lighteval.metrics.dynamic_metrics import (
- ExprExtractionConfig,
- LatexExtractionConfig,
- multilingual_extractive_match_metric,
-)
-from lighteval.tasks.lighteval_task import LightevalTaskConfig
-from lighteval.tasks.requests import Doc
-from lighteval.utils.language import Language
-
-
-latex_gold_metric = multilingual_extractive_match_metric(
- language=Language.ENGLISH,
- fallback_mode="first_match",
- precision=5,
- gold_extraction_target=(LatexExtractionConfig(),),
- # Match boxed first before trying other regexes
- pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig(boxed_match_priority=0)),
- aggregation_function=max,
-)
-
-expr_gold_metric = multilingual_extractive_match_metric(
- language=Language.ENGLISH,
- fallback_mode="first_match",
- precision=5,
- gold_extraction_target=(ExprExtractionConfig(),),
- # Match boxed first before trying other regexes
- pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig(boxed_match_priority=0)),
- aggregation_function=max,
-)
-
-
-def prompt_fn(line, task_name: str = None):
- """Assumes the model is either prompted to emit \\boxed{answer} or does so automatically"""
- return Doc(
- task_name=task_name,
- query=line["problem"],
- choices=[line["solution"]],
- gold_index=0,
- )
-
-
-def aime_prompt_fn(line, task_name: str = None):
- return Doc(
- task_name=task_name,
- query=line["problem"],
- choices=[line["answer"]],
- gold_index=0,
- )
-
-
-# Define tasks
-aime24 = LightevalTaskConfig(
- name="aime24",
- suite=["custom"],
- prompt_function=aime_prompt_fn,
- hf_repo="HuggingFaceH4/aime_2024",
- hf_subset="default",
- hf_avail_splits=["train"],
- evaluation_splits=["train"],
- few_shots_split=None,
- few_shots_select=None,
- generation_size=32768,
- metric=[expr_gold_metric],
- version=1,
-)
-math_500 = LightevalTaskConfig(
- name="math_500",
- suite=["custom"],
- prompt_function=prompt_fn,
- hf_repo="HuggingFaceH4/MATH-500",
- hf_subset="default",
- hf_avail_splits=["test"],
- evaluation_splits=["test"],
- few_shots_split=None,
- few_shots_select=None,
- generation_size=32768,
- metric=[latex_gold_metric],
- version=1,
-)
-
-# Add tasks to the table
-TASKS_TABLE = []
-TASKS_TABLE.append(aime24)
-TASKS_TABLE.append(math_500)
-
-# MODULE LOGIC
-if __name__ == "__main__":
- print([t["name"] for t in TASKS_TABLE])
- print(len(TASKS_TABLE))
diff --git a/src/open_r1/generate.py b/src/open_r1/generate.py
index 40ff3b39f..564dca071 100644
--- a/src/open_r1/generate.py
+++ b/src/open_r1/generate.py
@@ -53,7 +53,7 @@ def build_distilabel_pipeline(
generation_kwargs=generation_kwargs,
),
template=prompt_template,
- input_mappings={"instruction": prompt_column} if prompt_column is not None else {},
+ input_mappings=({"instruction": prompt_column} if prompt_column is not None else {}),
input_batch_size=input_batch_size,
num_generations=num_generations,
group_generations=True,
diff --git a/src/open_r1/grpo.py b/src/open_r1/grpo.py
index 24d31a434..44865b848 100644
--- a/src/open_r1/grpo.py
+++ b/src/open_r1/grpo.py
@@ -12,132 +12,167 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import re
-from dataclasses import dataclass, field
-
-from datasets import load_dataset
-
-from latex2sympy2_extended import NormalizationConfig
-from math_verify import LatexExtractionConfig, parse, verify
-from open_r1.configs import GRPOConfig
+import logging
+import os
+import sys
+
+import datasets
+import transformers
+from transformers import set_seed
+from transformers.trainer_utils import get_last_checkpoint
+
+from open_r1.configs import GRPOConfig, GRPOScriptArguments
+from open_r1.rewards import get_reward_funcs
+from open_r1.utils import get_dataset, get_model, get_tokenizer
from open_r1.utils.callbacks import get_callbacks
-from trl import GRPOTrainer, ModelConfig, ScriptArguments, TrlParser, get_peft_config
+from open_r1.utils.wandb_logging import init_wandb_training
+from trl import GRPOTrainer, ModelConfig, TrlParser, get_peft_config
-@dataclass
-class GRPOScriptArguments(ScriptArguments):
- """
- Script arguments for the GRPO training script.
+logger = logging.getLogger(__name__)
- Args:
- reward_funcs (`list[str]`):
- List of reward functions. Possible values: 'accuracy', 'format'.
- """
- reward_funcs: list[str] = field(
- default_factory=lambda: ["accuracy", "format"],
- metadata={"help": "List of reward functions. Possible values: 'accuracy', 'format'"},
+def main(script_args, training_args, model_args):
+ # Set seed for reproducibility
+ set_seed(training_args.seed)
+
+ ###############
+ # Setup logging
+ ###############
+ logging.basicConfig(
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ handlers=[logging.StreamHandler(sys.stdout)],
+ )
+ log_level = training_args.get_process_log_level()
+ logger.setLevel(log_level)
+ datasets.utils.logging.set_verbosity(log_level)
+ transformers.utils.logging.set_verbosity(log_level)
+ transformers.utils.logging.enable_default_handler()
+ transformers.utils.logging.enable_explicit_format()
+
+ # Log on each process a small summary
+ logger.warning(
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ + f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
+ logger.info(f"Model parameters {model_args}")
+ logger.info(f"Script parameters {script_args}")
+ logger.info(f"Training parameters {training_args}")
+ # Check for last checkpoint
+ last_checkpoint = None
+ if os.path.isdir(training_args.output_dir):
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
+ if last_checkpoint is not None and training_args.resume_from_checkpoint is None:
+ logger.info(f"Checkpoint detected, resuming training at {last_checkpoint=}.")
-def accuracy_reward(completions, solution, **kwargs):
- """Reward function that checks if the completion is the same as the ground truth."""
- contents = [completion[0]["content"] for completion in completions]
- rewards = []
- for content, sol in zip(contents, solution):
- gold_parsed = parse(sol, extraction_mode="first_match", extraction_config=[LatexExtractionConfig()])
- if len(gold_parsed) != 0:
- # We require the answer to be provided in correct latex (no malformed operators)
- answer_parsed = parse(
- content,
- extraction_config=[
- LatexExtractionConfig(
- normalization_config=NormalizationConfig(
- nits=False,
- malformed_operators=False,
- basic_latex=True,
- equations=True,
- boxed=True,
- units=True,
- ),
- # Ensures that boxed is tried first
- boxed_match_priority=0,
- try_extract_without_anchor=False,
- )
- ],
- extraction_mode="first_match",
- )
- # Reward 1 if the content is the same as the ground truth, 0 otherwise
- reward = float(verify(answer_parsed, gold_parsed))
- else:
- # If the gold solution is not parseable, we reward 1 to skip this example
- reward = 1.0
- print("Failed to parse gold solution: ", sol)
- rewards.append(reward)
-
- return rewards
-
-
-def format_reward(completions, **kwargs):
- """Reward function that checks if the completion has a specific format."""
- pattern = r"^.*?.*?$"
- completion_contents = [completion[0]["content"] for completion in completions]
- matches = [re.match(pattern, content) for content in completion_contents]
- return [1.0 if match else 0.0 for match in matches]
-
-
-reward_funcs_registry = {
- "accuracy": accuracy_reward,
- "format": format_reward,
-}
-
-SYSTEM_PROMPT = (
- "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant "
- "first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning "
- "process and answer are enclosed within and tags, respectively, i.e., "
- " reasoning process here answer here "
-)
+ if "wandb" in training_args.report_to:
+ init_wandb_training(training_args)
+ # Load the dataset
+ dataset = get_dataset(script_args)
-def main(script_args, training_args, model_args):
- # Get reward functions
- reward_funcs = [reward_funcs_registry[func] for func in script_args.reward_funcs]
+ ################
+ # Load tokenizer
+ ################
+ tokenizer = get_tokenizer(model_args, training_args)
- # Load the dataset
- dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
+ ##############
+ # Load model #
+ ##############
+ logger.info("*** Loading model ***")
+ model = get_model(model_args, training_args)
+
+ # Get reward functions from the registry
+ reward_funcs = get_reward_funcs(script_args)
# Format into conversation
- def make_conversation(example):
- return {
- "prompt": [
- {"role": "system", "content": SYSTEM_PROMPT},
- {"role": "user", "content": example["problem"]},
- ],
- }
+ def make_conversation(example, prompt_column: str = script_args.dataset_prompt_column):
+ prompt = []
+
+ if training_args.system_prompt is not None:
+ prompt.append({"role": "system", "content": training_args.system_prompt})
+
+ if prompt_column not in example:
+ raise ValueError(f"Dataset Question Field Error: {prompt_column} is not supported.")
+
+ prompt.append({"role": "user", "content": example[prompt_column]})
+ return {"prompt": prompt}
dataset = dataset.map(make_conversation)
+
for split in dataset:
if "messages" in dataset[split].column_names:
dataset[split] = dataset[split].remove_columns("messages")
+ #############################
# Initialize the GRPO trainer
+ #############################
trainer = GRPOTrainer(
- model=model_args.model_name_or_path,
+ model=model,
reward_funcs=reward_funcs,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
- eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
+ eval_dataset=(dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None),
peft_config=get_peft_config(model_args),
callbacks=get_callbacks(training_args, model_args),
+ processing_class=tokenizer,
)
- # Train and push the model to the Hub
- trainer.train()
-
- # Save and push to hub
+ ###############
+ # Training loop
+ ###############
+ logger.info("*** Train ***")
+ checkpoint = None
+ if training_args.resume_from_checkpoint is not None:
+ checkpoint = training_args.resume_from_checkpoint
+ elif last_checkpoint is not None:
+ checkpoint = last_checkpoint
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
+ metrics = train_result.metrics
+ metrics["train_samples"] = len(dataset[script_args.dataset_train_split])
+ trainer.log_metrics("train", metrics)
+ trainer.save_metrics("train", metrics)
+ trainer.save_state()
+
+ ##################################
+ # Save model and create model card
+ ##################################
+ logger.info("*** Save model ***")
+ # Align the model's generation config with the tokenizer's eos token
+ # to avoid unbounded generation in the transformers `pipeline()` function
+ trainer.model.generation_config.eos_token_id = tokenizer.eos_token_id
trainer.save_model(training_args.output_dir)
+ logger.info(f"Model saved to {training_args.output_dir}")
+
+ # Save everything else on main process
+ kwargs = {
+ "dataset_name": script_args.dataset_name,
+ "tags": ["open-r1"],
+ }
+ if trainer.accelerator.is_main_process:
+ trainer.create_model_card(**kwargs)
+ # Restore k,v cache for fast inference
+ trainer.model.config.use_cache = True
+ trainer.model.config.save_pretrained(training_args.output_dir)
+
+ ##########
+ # Evaluate
+ ##########
+ if training_args.do_eval:
+ logger.info("*** Evaluate ***")
+ metrics = trainer.evaluate()
+ metrics["eval_samples"] = len(dataset[script_args.dataset_test_split])
+ trainer.log_metrics("eval", metrics)
+ trainer.save_metrics("eval", metrics)
+
+ #############
+ # push to hub
+ #############
if training_args.push_to_hub:
- trainer.push_to_hub(dataset_name=script_args.dataset_name)
+ logger.info("Pushing to hub...")
+ trainer.push_to_hub(**kwargs)
if __name__ == "__main__":
diff --git a/src/open_r1/rewards.py b/src/open_r1/rewards.py
new file mode 100644
index 000000000..0b3662841
--- /dev/null
+++ b/src/open_r1/rewards.py
@@ -0,0 +1,706 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Reward functions for GRPO training."""
+
+import asyncio
+import json
+import math
+import re
+from functools import partial, update_wrapper
+from typing import Callable, Dict, Literal, Optional
+
+from latex2sympy2_extended import NormalizationConfig
+from math_verify import LatexExtractionConfig, parse, verify
+
+from .utils.code_providers import get_provider
+from .utils.competitive_programming import (
+ SubtaskResult,
+ add_includes,
+ get_morph_client_from_env,
+ get_piston_client_from_env,
+)
+from .utils.competitive_programming import patch_code as cf_patch_code
+from .utils.competitive_programming import score_submission as cf_score_submission
+from .utils.competitive_programming import score_subtask
+
+
+def accuracy_reward(completions: list[list[dict[str, str]]], solution: list[str], **kwargs) -> list[Optional[float]]:
+ """Reward function that checks if the completion is the same as the ground truth."""
+ contents = [completion[0]["content"] for completion in completions]
+ rewards = []
+ for content, sol in zip(contents, solution):
+ gold_parsed = parse(
+ sol,
+ extraction_mode="first_match",
+ )
+ if len(gold_parsed) != 0:
+ # We require the answer to be provided in correct latex (no malformed operators)
+ answer_parsed = parse(
+ content,
+ extraction_config=[
+ LatexExtractionConfig(
+ normalization_config=NormalizationConfig(
+ nits=False,
+ malformed_operators=False,
+ basic_latex=True,
+ equations=True,
+ boxed="all",
+ units=True,
+ ),
+ # Ensures that boxed is tried first
+ boxed_match_priority=0,
+ try_extract_without_anchor=False,
+ )
+ ],
+ extraction_mode="first_match",
+ )
+ # Compute binary rewards if verifiable, `None` otherwise to skip this example
+ try:
+ reward = float(verify(gold_parsed, answer_parsed))
+ except Exception as e:
+ print(f"verify failed: {e}, answer: {answer_parsed}, gold: {gold_parsed}")
+ reward = None
+ else:
+ # If the gold solution is not parseable, we assign `None` to skip this example
+ reward = None
+ print("Failed to parse gold solution: ", sol)
+ rewards.append(reward)
+
+ return rewards
+
+
+def format_reward(completions, **kwargs):
+ """Reward function that checks if the reasoning process is enclosed within and tags, while the final answer is enclosed within and tags."""
+ pattern = r"^\n.*?\n\n\n.*?\n$"
+ completion_contents = [completion[0]["content"] for completion in completions]
+ matches = [re.match(pattern, content, re.DOTALL | re.MULTILINE) for content in completion_contents]
+ return [1.0 if match else 0.0 for match in matches]
+
+
+def tag_count_reward(completions, **kwargs) -> list[float]:
+ """Reward function that checks if we produce the desired number of think and answer tags associated with `format_reward()`.
+
+ Adapted from: https://gist.github.com/willccbb/4676755236bb08cab5f4e54a0475d6fb#file-grpo_demo-py-L90
+ """
+
+ def count_tags(text: str) -> float:
+ count = 0.0
+ if text.count("\n") == 1:
+ count += 0.25
+ if text.count("\n\n") == 1:
+ count += 0.25
+ if text.count("\n\n") == 1:
+ count += 0.25
+ if text.count("\n") == 1:
+ count += 0.25
+ return count
+
+ contents = [completion[0]["content"] for completion in completions]
+ return [count_tags(c) for c in contents]
+
+
+def reasoning_steps_reward(completions, **kwargs):
+ r"""Reward function that checks for clear step-by-step reasoning.
+ Regex pattern:
+ Step \d+: - matches "Step 1:", "Step 2:", etc.
+ ^\d+\. - matches numbered lists like "1.", "2.", etc. at start of line
+ \n- - matches bullet points with hyphens
+ \n\* - matches bullet points with asterisks
+ First,|Second,|Next,|Finally, - matches transition words
+ """
+ pattern = r"(Step \d+:|^\d+\.|\n-|\n\*|First,|Second,|Next,|Finally,)"
+ completion_contents = [completion[0]["content"] for completion in completions]
+ matches = [len(re.findall(pattern, content)) for content in completion_contents]
+
+ # Magic number 3 to encourage 3 steps and more, otherwise partial reward
+ return [min(1.0, count / 3) for count in matches]
+
+
+def len_reward(completions: list[Dict[str, str]], solution: list[str], **kwargs) -> float:
+ """Compute length-based rewards to discourage overthinking and promote token efficiency.
+
+ Taken from the Kimi 1.5 tech report: https://huggingface.co/papers/2501.12599
+
+ Args:
+ completions: List of model completions
+ solution: List of ground truth solutions
+
+ Returns:
+ List of rewards where:
+ - For correct answers: reward = 0.5 - (len - min_len)/(max_len - min_len)
+ - For incorrect answers: reward = min(0, 0.5 - (len - min_len)/(max_len - min_len))
+ """
+ contents = [completion[0]["content"] for completion in completions]
+
+ # First check correctness of answers
+ correctness = []
+ for content, sol in zip(contents, solution):
+ gold_parsed = parse(
+ sol,
+ extraction_mode="first_match",
+ extraction_config=[LatexExtractionConfig()],
+ )
+ if len(gold_parsed) == 0:
+ # Skip unparseable examples
+ correctness.append(True) # Treat as correct to avoid penalizing
+ print("Failed to parse gold solution: ", sol)
+ continue
+
+ answer_parsed = parse(
+ content,
+ extraction_config=[
+ LatexExtractionConfig(
+ normalization_config=NormalizationConfig(
+ nits=False,
+ malformed_operators=False,
+ basic_latex=True,
+ equations=True,
+ boxed=True,
+ units=True,
+ ),
+ boxed_match_priority=0,
+ try_extract_without_anchor=False,
+ )
+ ],
+ extraction_mode="first_match",
+ )
+ correctness.append(verify(answer_parsed, gold_parsed))
+
+ # Calculate lengths
+ lengths = [len(content) for content in contents]
+ min_len = min(lengths)
+ max_len = max(lengths)
+
+ # If all responses have the same length, return zero rewards
+ if max_len == min_len:
+ return [0.0] * len(completions)
+
+ rewards = []
+ for length, is_correct in zip(lengths, correctness):
+ lambda_val = 0.5 - (length - min_len) / (max_len - min_len)
+
+ if is_correct:
+ reward = lambda_val
+ else:
+ reward = min(0, lambda_val)
+
+ rewards.append(float(reward))
+
+ return rewards
+
+
+def get_cosine_scaled_reward(
+ min_value_wrong: float = -1.0,
+ max_value_wrong: float = -0.5,
+ min_value_correct: float = 0.5,
+ max_value_correct: float = 1.0,
+ max_len: int = 1000,
+):
+ def cosine_scaled_reward(completions, solution, **kwargs):
+ """Reward function that scales based on completion length using a cosine schedule.
+
+ Shorter correct solutions are rewarded more than longer ones.
+ Longer incorrect solutions are penalized less than shorter ones.
+
+ Args:
+ completions: List of model completions
+ solution: List of ground truth solutions
+
+ This function is parameterized by the following arguments:
+ min_value_wrong: Minimum reward for wrong answers
+ max_value_wrong: Maximum reward for wrong answers
+ min_value_correct: Minimum reward for correct answers
+ max_value_correct: Maximum reward for correct answers
+ max_len: Maximum length for scaling
+ """
+ contents = [completion[0]["content"] for completion in completions]
+ rewards = []
+
+ for content, sol in zip(contents, solution):
+ gold_parsed = parse(
+ sol,
+ extraction_mode="first_match",
+ extraction_config=[LatexExtractionConfig()],
+ )
+ if len(gold_parsed) == 0:
+ rewards.append(1.0) # Skip unparseable examples
+ print("Failed to parse gold solution: ", sol)
+ continue
+
+ answer_parsed = parse(
+ content,
+ extraction_config=[
+ LatexExtractionConfig(
+ normalization_config=NormalizationConfig(
+ nits=False,
+ malformed_operators=False,
+ basic_latex=True,
+ equations=True,
+ boxed=True,
+ units=True,
+ ),
+ boxed_match_priority=0,
+ try_extract_without_anchor=False,
+ )
+ ],
+ extraction_mode="first_match",
+ )
+
+ is_correct = verify(answer_parsed, gold_parsed)
+ gen_len = len(content)
+
+ # Apply cosine scaling based on length
+ progress = gen_len / max_len
+ cosine = math.cos(progress * math.pi)
+
+ if is_correct:
+ min_value = min_value_correct
+ max_value = max_value_correct
+ else:
+ # Swap min/max for incorrect answers
+ min_value = max_value_wrong
+ max_value = min_value_wrong
+
+ reward = min_value + 0.5 * (max_value - min_value) * (1.0 + cosine)
+ rewards.append(float(reward))
+
+ return rewards
+
+ return cosine_scaled_reward
+
+
+def get_repetition_penalty_reward(ngram_size: int, max_penalty: float, language: str = "en"):
+ """
+ Computes N-gram repetition penalty as described in Appendix C.2 of https://huggingface.co/papers/2502.03373.
+ Reference implementation from: https://github.com/eddycmu/demystify-long-cot/blob/release/openrlhf/openrlhf/reward/repetition.py
+
+ Args:
+ ngram_size: size of the n-grams
+ max_penalty: Maximum (negative) penalty for wrong answers
+ language: Language of the text, defaults to `en`. Used to choose the way to split the text into n-grams.
+ """
+ if max_penalty > 0:
+ raise ValueError(f"max_penalty {max_penalty} should not be positive")
+
+ if language == "en":
+
+ def zipngram(text: str, ngram_size: int):
+ words = text.lower().split()
+ return zip(*[words[i:] for i in range(ngram_size)]), words
+
+ elif language == "zh":
+ from transformers.utils.import_utils import _is_package_available
+
+ if not _is_package_available("jieba"):
+ raise ValueError("Please install jieba to use Chinese language")
+
+ def zipngram(text: str, ngram_size: int):
+ import jieba
+
+ seg_list = list(jieba.cut(text))
+ return zip(*[seg_list[i:] for i in range(ngram_size)]), seg_list
+
+ else:
+ raise ValueError(
+ f"Word splitting for language `{language}` is not yet implemented. Please implement your own zip-ngram function."
+ )
+
+ def repetition_penalty_reward(completions, **kwargs) -> float:
+ """
+ reward function the penalizes repetitions
+ ref implementation: https://github.com/eddycmu/demystify-long-cot/blob/release/openrlhf/openrlhf/reward/repetition.py
+
+ Args:
+ completions: List of model completions
+ """
+
+ contents = [completion[0]["content"] for completion in completions]
+ rewards = []
+ for completion in contents:
+ if completion == "":
+ rewards.append(0.0)
+ continue
+
+ ngrams = set()
+ total = 0
+ ngram_array, words = zipngram(completion, ngram_size)
+
+ if len(words) < ngram_size:
+ rewards.append(0.0)
+ continue
+
+ for ng in ngram_array:
+ ngrams.add(ng)
+ total += 1
+
+ scaling = 1 - len(ngrams) / total
+ reward = scaling * max_penalty
+ rewards.append(reward)
+ return rewards
+
+ return repetition_penalty_reward
+
+
+def _init_event_loop():
+ """Initialize or get the current event loop."""
+ try:
+ loop = asyncio.get_event_loop()
+ except RuntimeError:
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ return loop
+
+
+def ioi_code_reward(completions, test_batch_size: int = 1, provider_type: str = "piston", **kwargs) -> list[float]:
+ """Reward function that evaluates IOI problems using a specified execution client.
+
+ Assumes the dataset has the same format as hf.co/datasets/open-r1/ioi
+
+ Args:
+ completions: List of model completions to evaluate
+ test_batch_size: Evaluate these many test cases in parallel, then check if any of them failed (0 score):
+ if so stop evaluating; otherwise continue with the next batch of test cases.
+ provider_type: The execution provider to use (default: "piston"). Supported values: "piston", "morph"
+ **kwargs: Additional arguments passed from the dataset
+ """
+ # Get the appropriate client based on provider_type
+ if provider_type == "morph":
+ execution_client = get_morph_client_from_env()
+ else:
+ # for info on setting up piston workers, see slurm/piston/README.md
+ execution_client = get_piston_client_from_env()
+
+ code_snippets = [
+ # note: grading is automatically skipped if no code is extracted
+ add_includes(extract_code(completion[-1]["content"], "cpp"), problem_id)
+ for completion, problem_id in zip(completions, kwargs["id"])
+ ]
+
+ async def run_catch_exceptions(task):
+ try:
+ return await task
+ except Exception as e:
+ print(f"Error from {provider_type} worker: {e}")
+ return SubtaskResult()
+
+ problems_data = [dict(zip(kwargs.keys(), values)) for values in zip(*kwargs.values())]
+
+ loop = _init_event_loop()
+ evals = [
+ loop.create_task(
+ run_catch_exceptions(
+ score_subtask(
+ execution_client,
+ problem_data,
+ code,
+ test_batch_size=test_batch_size,
+ )
+ )
+ )
+ for problem_data, code in zip(problems_data, code_snippets)
+ ]
+ results = loop.run_until_complete(asyncio.gather(*evals))
+
+ return [result.score for result in results]
+
+
+def cf_code_reward(
+ completions,
+ test_batch_size: int = 1,
+ patch_code: bool = False,
+ scoring_mode: Literal["pass_fail", "partial", "weighted_sum"] = "weighted_sum",
+ **kwargs,
+) -> list[float]:
+ """Reward function that evaluates Codeforces problems using Piston+our CF package.
+
+ Assumes the dataset has the same format as hf.co/datasets/open-r1/codeforces (verifiable-prompts subset)
+
+ test_batch_size: evaluate these many test cases in parallel, then check if any of them failed (0 score): if so stop evaluating; otherwise continue with the next batch of test cases.
+ """
+ # for info on setting up piston workers, see slurm/piston/README.md
+ piston_client = get_piston_client_from_env()
+
+ languages = kwargs["language"] if "language" in kwargs else [None] * len(completions)
+ code_snippets = [
+ # note: grading is automatically skipped if a problem has no tests
+ cf_patch_code(extract_code(completion[-1]["content"], language), language)
+ if patch_code
+ else extract_code(completion[-1]["content"], language)
+ for completion, language in zip(completions, languages)
+ ]
+
+ async def run_catch_exceptions(task):
+ try:
+ return await task
+ except Exception as e:
+ print(f"Error from Piston worker: {e}")
+ return None
+
+ # load problem data. undo separating kwargs by column
+ problems_data = [dict(zip(kwargs.keys(), values)) for values in zip(*kwargs.values())]
+
+ loop = _init_event_loop()
+ evals = [
+ loop.create_task(
+ run_catch_exceptions(
+ cf_score_submission(
+ piston_client,
+ problem_data,
+ code,
+ test_batch_size=test_batch_size,
+ scoring_mode=scoring_mode,
+ submission_language=problem_data.get("language", None),
+ )
+ )
+ )
+ for problem_data, code in zip(problems_data, code_snippets)
+ ]
+ results = loop.run_until_complete(asyncio.gather(*evals))
+
+ return results
+
+
+def extract_code(completion: str, language: str | None = "python") -> str:
+ if language is None:
+ return ""
+ pattern = re.compile(rf"```{language}\n(.*?)```", re.DOTALL)
+ matches = pattern.findall(completion)
+ extracted_answer = matches[-1] if len(matches) >= 1 else ""
+ return extracted_answer
+
+
+def binary_code_reward(
+ completions,
+ num_parallel: int = 2,
+ provider_type: str = "e2b",
+ enforce_same_language: bool = False,
+ **kwargs,
+) -> list[float]:
+ rewards = code_reward(
+ completions,
+ num_parallel=num_parallel,
+ provider_type=provider_type,
+ enforce_same_language=enforce_same_language,
+ **kwargs,
+ )
+ BINARY_THRESHOLD = 0.99
+
+ output = []
+ for reward in rewards:
+ if reward is None:
+ output.append(None)
+ else:
+ output.append(1.0 if reward > BINARY_THRESHOLD else 0.0)
+
+ return output
+
+
+def code_reward(
+ completions,
+ num_parallel: int = 2,
+ provider_type: str = "e2b",
+ enforce_same_language: bool = False,
+ **kwargs,
+) -> list[float]:
+ """Reward function that evaluates code snippets using a code execution provider.
+
+ Assumes the dataset contains a `verification_info` column with test cases.
+
+ Args:
+ completions: List of model completions to evaluate
+ num_parallel: Number of parallel code executions (default: 2)
+ provider_type: Which code execution provider to use (default: "e2b")
+ enforce_same_language: If True, verify all problems use the same language (default: False)
+ **kwargs: Additional arguments passed to the verification
+ """
+ evaluation_script_template = """
+ import subprocess
+ import json
+
+ def evaluate_code(code, test_cases):
+ passed = 0
+ total = len(test_cases)
+ exec_timeout = 5
+
+ for case in test_cases:
+ process = subprocess.run(
+ ["python3", "-c", code],
+ input=case["input"],
+ text=True,
+ capture_output=True,
+ timeout=exec_timeout
+ )
+
+ if process.returncode != 0: # Error in execution
+ continue
+
+ output = process.stdout.strip()
+
+ # TODO: implement a proper validator to compare against ground truth. For now we just check for exact string match on each line of stdout.
+ all_correct = True
+ for line1, line2 in zip(output.split('\\n'), case['output'].split('\\n')):
+ all_correct = all_correct and line1.strip() == line2.strip()
+
+ if all_correct:
+ passed += 1
+
+ success_rate = (passed / total)
+ return success_rate
+
+ code_snippet = {code}
+ test_cases = json.loads({test_cases})
+
+ evaluate_code(code_snippet, test_cases)
+ """
+
+ code_snippets = [extract_code(completion[-1]["content"]) for completion in completions]
+ verification_info = kwargs["verification_info"]
+
+ template = evaluation_script_template
+
+ scripts = [
+ template.format(code=json.dumps(code), test_cases=json.dumps(json.dumps(info["test_cases"])))
+ for code, info in zip(code_snippets, verification_info)
+ ]
+
+ language = verification_info[0]["language"]
+
+ if enforce_same_language:
+ all_same_language = all(v["language"] == language for v in verification_info)
+ if not all_same_language:
+ raise ValueError("All verification_info must have the same language", verification_info)
+
+ execution_provider = get_provider(
+ provider_type=provider_type,
+ num_parallel=num_parallel,
+ **kwargs,
+ )
+
+ return execution_provider.execute_scripts(scripts, ["python"] * len(scripts))
+
+
+def get_code_format_reward(language: str = "python"):
+ """Format reward function specifically for code responses.
+
+ Args:
+ language: Programming language supported by E2B https://e2b.dev/docs/code-interpreting/supported-languages
+ """
+
+ def code_format_reward(completions, **kwargs):
+ # if there is a language field, use it instead of the default language. This way we can have mixed language training.
+ languages = kwargs["language"] if "language" in kwargs else [language] * len(completions)
+
+ completion_contents = [completion[0]["content"] for completion in completions]
+ matches = [
+ re.match(
+ rf"^\n.*?\n\n\n.*?```{sample_language}.*?```.*?\n$",
+ content,
+ re.DOTALL | re.MULTILINE,
+ )
+ for content, sample_language in zip(completion_contents, languages)
+ ]
+ return [1.0 if match else 0.0 for match in matches]
+
+ return code_format_reward
+
+
+def get_soft_overlong_punishment(max_completion_len, soft_punish_cache):
+ """
+ Reward function that penalizes overlong completions. It is used to penalize overlong completions,
+ but not to reward shorter completions. Reference: Eq. (13) from the DAPO paper (https://huggingface.co/papers/2503.14476)
+
+ Args:
+ max_completion_len: Maximum length of the completion
+ soft_punish_cache: Minimum length of the completion. If set to 0, no minimum length is applied.
+ """
+
+ def soft_overlong_punishment_reward(completion_ids: list[list[int]], **kwargs) -> list[float]:
+ """Reward function that penalizes overlong completions."""
+ rewards = []
+ for ids in completion_ids:
+ completion_length = len(ids)
+ if completion_length <= max_completion_len - soft_punish_cache:
+ rewards.append(0.0)
+ elif max_completion_len - soft_punish_cache < completion_length <= max_completion_len:
+ rewards.append((max_completion_len - soft_punish_cache - completion_length) / soft_punish_cache)
+ else:
+ rewards.append(-1.0)
+ return rewards
+
+ return soft_overlong_punishment_reward
+
+
+def get_reward_funcs(script_args) -> list[Callable]:
+ REWARD_FUNCS_REGISTRY = {
+ "accuracy": accuracy_reward,
+ "format": format_reward,
+ "reasoning_steps": reasoning_steps_reward,
+ "cosine": get_cosine_scaled_reward(
+ min_value_wrong=script_args.cosine_min_value_wrong,
+ max_value_wrong=script_args.cosine_max_value_wrong,
+ min_value_correct=script_args.cosine_min_value_correct,
+ max_value_correct=script_args.cosine_max_value_correct,
+ max_len=script_args.cosine_max_len,
+ ),
+ "repetition_penalty": get_repetition_penalty_reward(
+ ngram_size=script_args.repetition_n_grams,
+ max_penalty=script_args.repetition_max_penalty,
+ ),
+ "length": len_reward,
+ "code": update_wrapper(
+ partial(
+ code_reward,
+ num_parallel=script_args.parallel_code_exec_per_proc,
+ provider_type=script_args.code_provider,
+ enforce_same_language=getattr(script_args, "enforce_same_language", False),
+ ),
+ code_reward,
+ ),
+ "binary_code": update_wrapper(
+ partial(
+ binary_code_reward,
+ num_parallel=script_args.parallel_code_exec_per_proc,
+ provider_type=script_args.code_provider,
+ enforce_same_language=getattr(script_args, "enforce_same_language", False),
+ ),
+ binary_code_reward,
+ ),
+ "ioi_code": update_wrapper(
+ partial(
+ ioi_code_reward,
+ test_batch_size=script_args.code_eval_test_batch_size,
+ provider_type=getattr(script_args, "ioi_provider", "piston"),
+ ),
+ ioi_code_reward,
+ ),
+ "cf_code": update_wrapper(
+ partial(
+ cf_code_reward,
+ test_batch_size=script_args.code_eval_test_batch_size,
+ scoring_mode=script_args.code_eval_scoring_mode,
+ ),
+ cf_code_reward,
+ ),
+ "code_format": get_code_format_reward(language=script_args.code_language),
+ "tag_count": tag_count_reward,
+ "soft_overlong_punishment": get_soft_overlong_punishment(
+ max_completion_len=script_args.max_completion_len,
+ soft_punish_cache=script_args.soft_punish_cache,
+ ),
+ }
+ reward_funcs = [REWARD_FUNCS_REGISTRY[func] for func in script_args.reward_funcs]
+
+ return reward_funcs
diff --git a/src/open_r1/sft.py b/src/open_r1/sft.py
index 56bcddb10..c11c023ca 100644
--- a/src/open_r1/sft.py
+++ b/src/open_r1/sft.py
@@ -18,83 +18,149 @@
Usage:
# One 1 node of 8 x H100s
-accelerate launch --config_file=configs/zero3.yaml src/open_r1/sft.py \
- --model_name_or_path Qwen/Qwen2.5-1.5B-Instruct \
- --dataset_name HuggingFaceH4/Bespoke-Stratos-17k \
- --learning_rate 2.0e-5 \
- --num_train_epochs 1 \
- --packing \
- --max_seq_length 4096 \
- --per_device_train_batch_size 4 \
- --gradient_accumulation_steps 4 \
+accelerate launch --config_file=recipes/accelerate_configs/zero3.yaml src/open_r1/sft.py \
+ --model_name_or_path open-r1/Qwen2.5-Math-7B-RoPE-300k \
+ --dataset_name open-r1/Mixture-of-Thoughts \
+ --dataset_config all \
+ --eos_token '<|im_end|>' \
+ --learning_rate 4.0e-5 \
+ --num_train_epochs 5 \
+ --max_seq_length 32768 \
+ --per_device_train_batch_size 2 \
--gradient_checkpointing \
--bf16 \
- --logging_steps 5 \
- --eval_strategy steps \
- --eval_steps 100 \
- --output_dir data/Qwen2.5-1.5B-Open-R1-Distill
+ --use_liger_kernel \
+ --output_dir data/OpenR1-Distill-7B
"""
-from datasets import load_dataset
-from transformers import AutoTokenizer
+import logging
+import os
+import sys
-from open_r1.configs import SFTConfig
+import datasets
+import transformers
+from transformers import set_seed
+from transformers.trainer_utils import get_last_checkpoint
+
+from open_r1.configs import ScriptArguments, SFTConfig
+from open_r1.utils import get_dataset, get_model, get_tokenizer
from open_r1.utils.callbacks import get_callbacks
-from trl import (
- ModelConfig,
- ScriptArguments,
- SFTTrainer,
- TrlParser,
- get_kbit_device_map,
- get_peft_config,
- get_quantization_config,
-)
+from open_r1.utils.wandb_logging import init_wandb_training
+from trl import ModelConfig, SFTTrainer, TrlParser, get_peft_config, setup_chat_format
+
+
+logger = logging.getLogger(__name__)
def main(script_args, training_args, model_args):
- ################
- # Model init kwargs & Tokenizer
- ################
- quantization_config = get_quantization_config(model_args)
- model_kwargs = dict(
- revision=model_args.model_revision,
- trust_remote_code=model_args.trust_remote_code,
- attn_implementation=model_args.attn_implementation,
- torch_dtype=model_args.torch_dtype,
- use_cache=False if training_args.gradient_checkpointing else True,
- device_map=get_kbit_device_map() if quantization_config is not None else None,
- quantization_config=quantization_config,
- )
- training_args.model_init_kwargs = model_kwargs
- tokenizer = AutoTokenizer.from_pretrained(
- model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, use_fast=True
+ set_seed(training_args.seed)
+
+ ###############
+ # Setup logging
+ ###############
+ logging.basicConfig(
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ handlers=[logging.StreamHandler(sys.stdout)],
)
- tokenizer.pad_token = tokenizer.eos_token
+ log_level = training_args.get_process_log_level()
+ logger.setLevel(log_level)
+ datasets.utils.logging.set_verbosity(log_level)
+ transformers.utils.logging.set_verbosity(log_level)
+ transformers.utils.logging.enable_default_handler()
+ transformers.utils.logging.enable_explicit_format()
+
+ logger.info(f"Model parameters {model_args}")
+ logger.info(f"Script parameters {script_args}")
+ logger.info(f"Training parameters {training_args}")
- ################
- # Dataset
- ################
- dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
+ # Check for last checkpoint
+ last_checkpoint = None
+ if os.path.isdir(training_args.output_dir):
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
+ if last_checkpoint is not None and training_args.resume_from_checkpoint is None:
+ logger.info(f"Checkpoint detected, resuming training at {last_checkpoint=}.")
- ################
- # Training
- ################
+ if "wandb" in training_args.report_to:
+ init_wandb_training(training_args)
+
+ ######################################
+ # Load dataset, tokenizer, and model #
+ ######################################
+ dataset = get_dataset(script_args)
+ tokenizer = get_tokenizer(model_args, training_args)
+ model = get_model(model_args, training_args)
+
+ if tokenizer.chat_template is None:
+ logger.info("No chat template provided, defaulting to ChatML.")
+ model, tokenizer = setup_chat_format(model, tokenizer, format="chatml")
+
+ ############################
+ # Initialize the SFT Trainer
+ ############################
trainer = SFTTrainer(
- model=model_args.model_name_or_path,
+ model=model,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
- eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
+ eval_dataset=(dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None),
processing_class=tokenizer,
peft_config=get_peft_config(model_args),
callbacks=get_callbacks(training_args, model_args),
)
- trainer.train()
+ ###############
+ # Training loop
+ ###############
+ logger.info("*** Train ***")
+ checkpoint = None
+ if training_args.resume_from_checkpoint is not None:
+ checkpoint = training_args.resume_from_checkpoint
+ elif last_checkpoint is not None:
+ checkpoint = last_checkpoint
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
+ metrics = train_result.metrics
+ metrics["train_samples"] = len(dataset[script_args.dataset_train_split])
+ trainer.log_metrics("train", metrics)
+ trainer.save_metrics("train", metrics)
+ trainer.save_state()
- # Save and push to hub
+ ##################################
+ # Save model and create model card
+ ##################################
+ logger.info("*** Save model ***")
+ # Align the model's generation config with the tokenizer's eos token
+ # to avoid unbounded generation in the transformers `pipeline()` function
+ trainer.model.generation_config.eos_token_id = tokenizer.eos_token_id
trainer.save_model(training_args.output_dir)
+ logger.info(f"Model saved to {training_args.output_dir}")
+
+ # Save everything else on main process
+ kwargs = {
+ "dataset_name": script_args.dataset_name,
+ "tags": ["open-r1"],
+ }
+ if trainer.accelerator.is_main_process:
+ trainer.create_model_card(**kwargs)
+ # Restore k,v cache for fast inference
+ trainer.model.config.use_cache = True
+ trainer.model.config.save_pretrained(training_args.output_dir)
+
+ ##########
+ # Evaluate
+ ##########
+ if training_args.do_eval:
+ logger.info("*** Evaluate ***")
+ metrics = trainer.evaluate()
+ metrics["eval_samples"] = len(dataset[script_args.dataset_test_split])
+ trainer.log_metrics("eval", metrics)
+ trainer.save_metrics("eval", metrics)
+
+ #############
+ # push to hub
+ #############
if training_args.push_to_hub:
- trainer.push_to_hub(dataset_name=script_args.dataset_name)
+ logger.info("Pushing to hub...")
+ trainer.push_to_hub(**kwargs)
if __name__ == "__main__":
diff --git a/src/open_r1/utils/__init__.py b/src/open_r1/utils/__init__.py
index e69de29bb..d3b84a99d 100644
--- a/src/open_r1/utils/__init__.py
+++ b/src/open_r1/utils/__init__.py
@@ -0,0 +1,6 @@
+from .data import get_dataset
+from .import_utils import is_e2b_available, is_morph_available
+from .model_utils import get_model, get_tokenizer
+
+
+__all__ = ["get_tokenizer", "is_e2b_available", "is_morph_available", "get_model", "get_dataset"]
diff --git a/src/open_r1/utils/callbacks.py b/src/open_r1/utils/callbacks.py
index c1b0ac5dc..88e656243 100644
--- a/src/open_r1/utils/callbacks.py
+++ b/src/open_r1/utils/callbacks.py
@@ -44,7 +44,13 @@ class PushToHubRevisionCallback(TrainerCallback):
def __init__(self, model_config) -> None:
self.model_config = model_config
- def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
+ def on_save(
+ self,
+ args: TrainingArguments,
+ state: TrainerState,
+ control: TrainerControl,
+ **kwargs,
+ ):
if state.is_world_process_zero:
global_step = state.global_step
diff --git a/src/open_r1/utils/code_providers.py b/src/open_r1/utils/code_providers.py
new file mode 100644
index 000000000..71830e6ae
--- /dev/null
+++ b/src/open_r1/utils/code_providers.py
@@ -0,0 +1,366 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Code execution providers for executing and evaluating code snippets."""
+
+import abc
+import asyncio
+from typing import List, Optional
+
+from ..utils import is_e2b_available, is_morph_available
+
+
+if is_e2b_available():
+ from e2b_code_interpreter import AsyncSandbox
+ from e2b_code_interpreter.models import Execution
+
+ from .routed_sandbox import RoutedSandbox
+else:
+ AsyncSandbox = None
+ Execution = None
+ RoutedSandbox = None
+
+if is_morph_available():
+ from morphcloud.api import MorphCloudClient
+ from morphcloud.sandbox import Sandbox
+
+ from .routed_morph import RoutedMorphSandbox
+else:
+ MorphCloudClient = None
+ Sandbox = None
+ RoutedMorphSandbox = None
+
+
+class CodeExecutionProvider(abc.ABC):
+ """Abstract base class for code execution providers."""
+
+ @abc.abstractmethod
+ def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]:
+ """Execute multiple scripts and return their reward values.
+
+ Args:
+ scripts: List of code scripts to execute
+ language: The programming language of the scripts
+
+ Returns:
+ List of float rewards (one per script)
+ """
+ pass
+
+
+class E2BProvider(CodeExecutionProvider):
+ """Provider that executes code using E2B sandboxes."""
+
+ def __init__(self, num_parallel: int = 2, e2b_router_url: Optional[str] = None):
+ """Initialize the E2B provider.
+
+ Args:
+ num_parallel: Number of parallel sandboxes to use
+ e2b_router_url: URL for the E2B router (if using router mode)
+ """
+ if not is_e2b_available():
+ raise ImportError(
+ "E2B is not available and required for this provider. Please install E2B with "
+ "`pip install e2b-code-interpreter` and add an API key to a `.env` file."
+ )
+
+ self.num_parallel = num_parallel
+ self.e2b_router_url = e2b_router_url
+
+ def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]:
+ """Execute scripts using E2B sandboxes.
+
+ If e2b_router_url is provided, uses the RoutedSandbox for batch processing.
+ Otherwise, uses direct AsyncSandbox with parallelization.
+ """
+ if self.e2b_router_url is not None:
+ routed_sandbox = RoutedSandbox(router_url=self.e2b_router_url)
+
+ executions = routed_sandbox.run_code(
+ scripts=scripts,
+ languages=languages,
+ timeout=30,
+ request_timeout=28,
+ )
+
+ rewards = []
+ for execution in executions:
+ try:
+ reward = float(execution.text)
+ rewards.append(reward)
+ except Exception:
+ rewards.append(None)
+ return rewards
+
+ try:
+ rewards = self._run_async_from_sync(scripts, languages, self.num_parallel)
+ except Exception as e:
+ print(f"Error from E2B executor: {e}")
+ rewards = [0.0] * len(scripts)
+
+ return rewards
+
+ def _run_async_from_sync(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]:
+ """Function wrapping the `_run_async` function."""
+ try:
+ rewards = asyncio.run(self._run_async(scripts, languages, num_parallel))
+ except Exception as e:
+ print(f"Error from E2B executor async: {e}")
+ raise e
+
+ return rewards
+
+ async def _run_async(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]:
+ semaphore = asyncio.Semaphore(num_parallel)
+
+ tasks = [self._run_script(script, languages, semaphore) for script in scripts]
+
+ results = await asyncio.gather(*tasks)
+ rewards = list(results)
+
+ return rewards
+
+ async def _run_script(self, script: str, languages: List[str], semaphore: asyncio.Semaphore) -> float:
+ # We set a timeout margin, as the AsyncSandbox timeout does not seem to work
+ # These values are based on running 256 examples with the gold solution
+ # from open-r1/verifiable-coding-problems-python_decontaminated
+ # see scripts/benchmark_e2b.py
+
+ SANDBOX_TIMEOUT = 30
+ MARGIN = 2
+ REQUEST_TIMEOUT = SANDBOX_TIMEOUT - MARGIN
+ ASYNCIO_TIMEOUT = SANDBOX_TIMEOUT + MARGIN
+
+ async with semaphore:
+ try:
+ sandbox = await AsyncSandbox.create(timeout=SANDBOX_TIMEOUT, request_timeout=REQUEST_TIMEOUT)
+ execution = await asyncio.wait_for(
+ sandbox.run_code(script, languages=languages),
+ timeout=ASYNCIO_TIMEOUT,
+ )
+ return float(execution.text)
+ except (TypeError, ValueError):
+ return 0.0
+ except asyncio.TimeoutError:
+ print("Operation timed out")
+ return 0.0
+ except Exception as e:
+ print(f"Error in `_run_script` from E2B sandbox ID {sandbox.sandbox_id} : {e}")
+ return 0.0
+ finally:
+ try:
+ await sandbox.kill()
+ except Exception as e:
+ print(f"Error from E2B executor kill with sandbox ID {sandbox.sandbox_id} : {e}")
+
+
+class MorphProvider(CodeExecutionProvider):
+ """Provider that executes code using MorphCloud's Sandbox API."""
+
+ def __init__(self, num_parallel: int = 2, morph_router_url: Optional[str] = None):
+ """Initialize the Morph provider.
+
+ Args:
+ num_parallel: Number of parallel executions to use
+ morph_router_url: URL for the MorphCloud router (if using router mode)
+ """
+ if not is_morph_available():
+ raise ImportError(
+ "MorphCloud is not available and required for this provider. Please install MorphCloud with "
+ "`pip install morphcloud` and add an API key to a `.env` file."
+ )
+
+ try:
+ from dotenv import load_dotenv
+
+ load_dotenv()
+ except ImportError:
+ print("Warning: python-dotenv not installed. Environment variables must be set directly.")
+
+ self.num_parallel = num_parallel
+ self.morph_router_url = morph_router_url
+
+ if self.morph_router_url is not None:
+ self.routed_sandbox = RoutedMorphSandbox(router_url=self.morph_router_url)
+ return
+
+ import os
+
+ self.api_key = os.getenv("MORPH_API_KEY")
+ if not self.api_key:
+ raise ValueError("MorphCloud API key not found. Please set the MORPH_API_KEY environment variable.")
+
+ try:
+ self.client = MorphCloudClient(api_key=self.api_key)
+ self.Sandbox = Sandbox
+ except ImportError as e:
+ raise ImportError(f"Required MorphCloud dependencies not installed: {e}")
+
+ def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]:
+ """Execute scripts using MorphCloud Sandbox API.
+
+ Args:
+ scripts: List of Python scripts to execute
+ language: Programming language
+
+ Returns:
+ List of float rewards (one per script)
+ """
+
+ if hasattr(self, "routed_sandbox"):
+ try:
+ results = self.routed_sandbox.run_code(
+ scripts=scripts,
+ languages=languages,
+ timeout=90,
+ request_timeout=96,
+ )
+
+ rewards = []
+ for result in results:
+ try:
+ reward = float(result.text)
+ rewards.append(reward)
+ except (ValueError, AttributeError):
+ rewards.append(0.0)
+ return rewards
+ except Exception as e:
+ print(f"Error from MorphCloud router: {e}")
+ return [0.0] * len(scripts)
+
+ import asyncio
+
+ try:
+ rewards = asyncio.run(self._run_async(scripts, languages, self.num_parallel))
+ except Exception as e:
+ print(f"Error from MorphCloud executor: {e}")
+ rewards = [0.0] * len(scripts)
+
+ return rewards
+
+ async def _run_async(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]:
+ """Run multiple scripts concurrently with limited parallelism.
+
+ Args:
+ scripts: List of scripts to execute
+ language: Programming language
+ num_parallel: Maximum number of concurrent executions
+
+ Returns:
+ List of rewards
+ """
+
+ semaphore = asyncio.Semaphore(num_parallel)
+
+ tasks = [self._run_script(script, languages, semaphore) for script in scripts]
+
+ results = await asyncio.gather(*tasks)
+
+ return list(results)
+
+ async def _run_script(self, script: str, languages: List[str], semaphore: asyncio.Semaphore) -> float:
+ """Execute a single script in a MorphCloud Sandbox.
+
+ Args:
+ script: The script to execute
+ language: Programming language
+ semaphore: Semaphore to limit concurrency
+
+ Returns:
+ Float reward from script execution
+ """
+ SANDBOX_TIMEOUT = 90
+ MARGIN = 6
+ ASYNCIO_TIMEOUT = SANDBOX_TIMEOUT + MARGIN
+
+ sandbox = None
+ async with semaphore:
+ try:
+ sandbox = await asyncio.to_thread(self.Sandbox.new, client=self.client, ttl_seconds=SANDBOX_TIMEOUT)
+ result = await asyncio.wait_for(
+ asyncio.to_thread(
+ sandbox.run_code,
+ script,
+ languages=languages,
+ timeout=SANDBOX_TIMEOUT,
+ ),
+ timeout=ASYNCIO_TIMEOUT,
+ )
+
+ reward = 0.0
+ try:
+ if hasattr(result, "text") and result.text:
+ lines = result.text.strip().split("\n")
+ if lines:
+ try:
+ reward = float(lines[-1])
+ except ValueError:
+ try:
+ reward = float(result.text.strip())
+ except ValueError:
+ pass
+ elif hasattr(result, "stdout") and result.stdout:
+ lines = result.stdout.strip().split("\n")
+ if lines:
+ try:
+ reward = float(lines[-1])
+ except ValueError:
+ pass
+ except (ValueError, AttributeError):
+ pass
+
+ return reward
+
+ except asyncio.TimeoutError:
+ return 0.0
+ except Exception:
+ return 0.0
+ finally:
+ if sandbox:
+ try:
+ await asyncio.to_thread(sandbox.close)
+ await asyncio.to_thread(sandbox.shutdown)
+ except Exception:
+ pass
+
+
+def get_provider(provider_type: str = "e2b", **kwargs) -> CodeExecutionProvider:
+ """Factory function to get the appropriate code execution provider.
+
+ Args:
+ provider_type: Type of provider to use ("e2b", "morph")
+ **kwargs: Additional arguments to pass to the provider
+
+ Returns:
+ An instance of CodeExecutionProvider
+ """
+ num_parallel = kwargs.pop("num_parallel", 2)
+
+ if provider_type == "e2b":
+ # Extract E2B-specific arguments
+ e2b_router_url = kwargs.pop("e2b_router_url", None)
+ return E2BProvider(
+ num_parallel=num_parallel,
+ e2b_router_url=e2b_router_url,
+ )
+ elif provider_type == "morph":
+ # Extract Morph-specific arguments
+ morph_router_url = kwargs.pop("morph_router_url", None)
+ return MorphProvider(
+ num_parallel=num_parallel,
+ morph_router_url=morph_router_url,
+ )
+ else:
+ raise ValueError(f"Unknown provider type: {provider_type}")
diff --git a/src/open_r1/utils/competitive_programming/__init__.py b/src/open_r1/utils/competitive_programming/__init__.py
new file mode 100644
index 000000000..081e16fea
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/__init__.py
@@ -0,0 +1,19 @@
+from .cf_scoring import score_submission
+from .code_patcher import patch_code
+from .ioi_scoring import SubtaskResult, score_subtask, score_subtasks
+from .ioi_utils import add_includes
+from .morph_client import get_morph_client_from_env
+from .piston_client import get_piston_client_from_env, get_slurm_piston_endpoints
+
+
+__all__ = [
+ "get_piston_client_from_env",
+ "get_slurm_piston_endpoints",
+ "get_morph_client_from_env",
+ "patch_code",
+ "score_submission",
+ "score_subtask",
+ "score_subtasks",
+ "add_includes",
+ "SubtaskResult",
+]
diff --git a/src/open_r1/utils/competitive_programming/cf_scoring.py b/src/open_r1/utils/competitive_programming/cf_scoring.py
new file mode 100644
index 000000000..d3ede4f7e
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/cf_scoring.py
@@ -0,0 +1,146 @@
+import asyncio
+import os
+from io import BytesIO
+from typing import Literal
+
+from async_lru import alru_cache
+
+from .piston_client import PistonClient
+from .utils import batched
+
+
+async def score_single_test_case(
+ client: PistonClient,
+ problem_data: dict,
+ test_input: str,
+ test_output: str,
+ submission: str,
+ submission_language: str = "cpp",
+) -> tuple[str, str]:
+ if submission_language not in ["python", "cpp"]:
+ raise ValueError(f"Invalid submission language: {submission_language}")
+ try:
+ result = await client.send_execute(
+ {
+ "files": [
+ {"name": f"main.{submission_language}", "content": submission},
+ *(
+ [{"name": "checker.py", "content": problem_data["generated_checker"]}]
+ if problem_data["generated_checker"]
+ else []
+ ),
+ {"name": "input.txt", "content": test_input},
+ {"name": "correct_output.txt", "content": test_output},
+ {
+ "name": "grader_config",
+ "content": "\n".join(
+ f"{key}={value}"
+ for key, value in {
+ "TIME_LIMIT": problem_data["time_limit"],
+ "MEMORY_LIMIT": problem_data["memory_limit"],
+ "INPUT_MODE": problem_data["input_mode"],
+ }.items()
+ ),
+ },
+ ],
+ "run_timeout": (problem_data["time_limit"] + 10) * 1000,
+ # +10 seconds hard limit. time limits are handled by the codeforces script
+ },
+ language="cf_python3" if submission_language == "python" else "c++17",
+ )
+ except Exception as e:
+ print(f"Error scoring submission: {e}")
+ return False
+
+ return result
+
+
+@alru_cache(maxsize=32) # TODO make this configurable
+async def get_generated_contest_tests(contest_id: str) -> list[dict]:
+ import pandas as pd
+
+ import aiofiles
+ import aiofiles.os
+
+ tests_folder = os.environ.get("CF_TESTS_FOLDER", None)
+ if not tests_folder:
+ raise ValueError(
+ "CF_TESTS_FOLDER environment variable not set! Please download the codeforces generated tests and set CF_TESTS_FOLDER to the folder path. See https://huggingface.co/datasets/open-r1/codeforces for more information."
+ )
+ if not await aiofiles.os.path.exists(tests_folder):
+ raise ValueError(
+ f"CF_TESTS_FOLDER path '{tests_folder}' does not exist! Please download the codeforces generated tests and set CF_TESTS_FOLDER to the folder path. See https://huggingface.co/datasets/open-r1/codeforces for more information."
+ )
+ parquet_path = os.path.join(tests_folder, f"test_cases_{int(contest_id):04d}.parquet")
+ if not await aiofiles.os.path.exists(parquet_path):
+ return {}
+
+ # Read parquet file asynchronously
+ async with aiofiles.open(parquet_path, "rb") as f:
+ content = await f.read()
+ df = pd.read_parquet(BytesIO(content))
+
+ # Group by problem_id and convert to dictionary of lists
+ grouped_tests = df.groupby("problem_id").apply(lambda x: x[["input", "output"]].to_dict("records")).to_dict()
+
+ return grouped_tests
+
+
+async def get_generated_tests(problem_id: str) -> list[dict]:
+ contest_id = problem_id.split("/")[0]
+ return (await get_generated_contest_tests(contest_id)).get(problem_id, [])
+
+
+async def score_submission(
+ client: PistonClient,
+ problem_data: dict,
+ submission: str,
+ test_batch_size: int = 1,
+ scoring_mode: Literal["pass_fail", "partial", "weighted_sum"] = "weighted_sum",
+ no_compile_reward: float = -0.1,
+ no_submission_reward: float = -1.0,
+ submission_language: str = "cpp",
+) -> float:
+ if submission_language not in ["python", "cpp"]:
+ raise ValueError(f"Invalid submission language: {submission_language}")
+ test_cases = problem_data["official_tests"] + (await get_generated_tests(problem_data["id"]))
+ # invalid/not a coding problem
+ if test_cases is None or len(test_cases) == 0:
+ return None
+ # no code extracted
+ if not submission:
+ return no_submission_reward
+
+ passed_test_cases = 0
+ # run one batch, check if any of them failed (0 score): if so stop evaluating (assuming non partial score); otherwise continue with the next batch of test cases.
+ for test_batch_to_run in batched(test_cases, test_batch_size) if test_batch_size >= 1 else [test_cases]:
+ results = await asyncio.gather(
+ *[
+ asyncio.create_task(
+ score_single_test_case(
+ client, problem_data, test_case["input"], test_case["output"], submission, submission_language
+ )
+ )
+ for test_case in test_batch_to_run
+ ]
+ )
+ if any(result and result["compile"]["code"] != 0 for result in results):
+ return no_compile_reward
+
+ tests_passed_results = [
+ result and result["run"]["code"] == 0 and result["run"]["stdout"].strip() == "1" for result in results
+ ]
+ if scoring_mode == "pass_fail" and any(not test_passed for test_passed in tests_passed_results):
+ break
+ passed_test_cases += sum(1 for test_passed in tests_passed_results if test_passed)
+
+ pass_fail_score = 1.0 if passed_test_cases == len(test_cases) else 0.0
+
+ if scoring_mode == "pass_fail":
+ return pass_fail_score
+ elif scoring_mode == "partial":
+ return passed_test_cases / len(test_cases)
+ elif scoring_mode == "weighted_sum":
+ return pass_fail_score + 0.1 * (passed_test_cases / len(test_cases))
+ else:
+ raise ValueError(f"Invalid scoring mode: {scoring_mode}")
diff --git a/src/open_r1/utils/competitive_programming/code_patcher.py b/src/open_r1/utils/competitive_programming/code_patcher.py
new file mode 100644
index 000000000..4d5536020
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/code_patcher.py
@@ -0,0 +1,123 @@
+import re
+
+
+def fix_python3_imports(source_code):
+ """
+ Fix common import and function changes between Python 3 versions
+
+ Args:
+ source_code (str): The Python source code to update
+
+ Returns:
+ str: The updated source code
+ """
+ # Dictionary of patterns to replacements
+ replacements = [
+ # Fix collections.abc imports (changed in Python 3.3+)
+ (
+ r"from collections import (Mapping|Sequence|Set|Container|MutableMapping|MutableSet|MutableSequence)",
+ r"from collections.abc import \1",
+ ),
+ # Fix imp module deprecation (deprecated in 3.4)
+ (r"import imp", r"import importlib"),
+ # Fix asyncio.async() to asyncio.ensure_future() (renamed in 3.4.4)
+ (r"asyncio\.async\(", r"asyncio.ensure_future("),
+ # Fix inspect.getargspec to inspect.getfullargspec (deprecated in 3.5)
+ (r"inspect\.getargspec", r"inspect.getfullargspec"),
+ # Fix array.array 'c' type code to 'b' (removed in 3.9)
+ (r"array\.array\('c'", r"array.array('b'"),
+ # Fix backslash line continuation with multiple newlines (Python-specific issue)
+ (r"\\(\r\n|\r|\n)+", "\\\n"),
+ # some solutions use getlogin() to check if they are debugging or on an actual submission
+ (r"(?:os\s*\.\s*)?getlogin\s*\(\s*\)", "False"),
+ # Fix usage of fractions.gcd (moved to math in 3.5)
+ # 1. Fix direct usage: fractions.gcd -> math.gcd
+ (r"\bfractions\.gcd\b", r"math.gcd"),
+ # 2. Fix 'from fractions import gcd, X' -> 'from fractions import X' (start/middle)
+ (r"(from\s+fractions\s+import\s+(?:\([^)]*)?)\bgcd\s*,\s*", r"\1"),
+ # 3. Fix 'from fractions import X, gcd' -> 'from fractions import X' (end)
+ (r"(from\s+fractions\s+import\s+.*?\S)\s*,\s*\bgcd(\s*\)?\s*(?:#.*)?)", r"\1\2"),
+ # 4. Fix standalone 'from fractions import gcd' -> 'from math import gcd'
+ (r"from\s+fractions\s+import\s+\(?\s*gcd\s*\)?", r""),
+ # --- End: Replacement for the faulty line ---
+ ]
+
+ lines = source_code.splitlines()
+ last_import = max(
+ [
+ i
+ for i, line in enumerate(lines)
+ if line.strip().startswith("import") or (line.strip().startswith("from") and "import" in line)
+ ],
+ default=0,
+ )
+ import_section = "\n".join(lines[: last_import + 1])
+ main_source = "\n".join(lines[last_import:])
+
+ if "fractions.gcd" in source_code and "import math" not in source_code:
+ import_section += "\nimport math"
+ elif "gcd" in source_code and "from math import gcd" not in source_code:
+ import_section += "\nfrom math import gcd"
+
+ if "set_int_max_str_digits" not in source_code:
+ import_section += "\nimport sys\nsys.set_int_max_str_digits(0)"
+
+ source_code = import_section + "\n" + main_source
+
+ # Apply each replacement
+ for pattern, replacement in replacements:
+ source_code = re.sub(pattern, replacement, source_code)
+
+ source_code = source_code.rstrip("\\")
+
+ return source_code
+
+
+def fix_cpp_includes(source_code):
+ # has most of the useful functions
+ code_header = "#include \n"
+ # use namespace std since models forget std:: often
+ if "using namespace std;" not in source_code and "std::" not in source_code:
+ code_header += "\nusing namespace std;\n\n"
+ return code_header + source_code
+
+
+def is_patchable(lang):
+ return lang in ("python", "python3", "Python 3", "PyPy 3", "PyPy 3-64", "cpp") or "C++" in lang
+
+
+def patch_code(text, lang):
+ if not text:
+ return text
+ if lang in ("python", "python3", "Python 3", "PyPy 3", "PyPy 3-64"):
+ return fix_python3_imports(text)
+ elif "cpp" in lang or "C++" in lang:
+ return fix_cpp_includes(text)
+ return text
+
+
+tests = [
+ """read = lambda: map(int, input().split())
+n, m, z = read()
+from fractions import gcd
+ans = z // (n * m // gcd(n, m))
+print(ans)""",
+ """from fractions import Fraction,gcd
+
+a,b,c,d = [int(x) for x in input().split()]
+
+if a*d > b*c:
+ num = a*d-b*c
+ denom = a*d
+else:
+ num = b*c-a*d
+ denom = b*c
+div = gcd(num,denom)
+print('%d/%d'%(num//div,denom//div))""",
+]
+
+if __name__ == "__main__":
+ for test in tests:
+ print("ORIGINAL:", test, sep="\n\n")
+ print("PATCHED:", patch_code(test, "Python 3"), sep="\n\n")
+ print("=" * 50)
diff --git a/src/open_r1/utils/competitive_programming/ioi_scoring.py b/src/open_r1/utils/competitive_programming/ioi_scoring.py
new file mode 100644
index 000000000..357156c89
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/ioi_scoring.py
@@ -0,0 +1,335 @@
+import asyncio
+from dataclasses import asdict, dataclass, field
+from typing import Union
+
+from .ioi_utils import load_ioi_tests
+from .piston_client import PistonClient, PistonError
+from .utils import batched
+
+
+@dataclass
+class TestResult:
+ """
+ Represents the result of a single test case execution.
+
+ Attributes:
+ test_name: Name of the test case
+ score: Score achieved for this test (0.0 to 1.0)
+ status: Status code of the test result (e.g., 'AC', 'WA', 'TLE')
+ feedback: Detailed feedback message from the judge or an error message
+ """
+
+ test_name: str
+ score: float = 0.0
+ status: str = "SKIPPED"
+ feedback: str = None
+
+
+@dataclass
+class SubtaskResult:
+ """
+ Represents the result of a subtask containing multiple test cases.
+
+ Attributes:
+ problem: Problem identifier
+ subtask: Subtask identifier
+ points: Maximum points available for this subtask
+ score_precision: Number of decimal places for score rounding
+ test_results: List of individual test case results
+ """
+
+ problem: str = None
+ subtask: str = None
+
+ points: float = 0.0
+ score_precision: int = 2
+
+ test_results: list[TestResult] = field(default_factory=list)
+
+ @property
+ def status(self):
+ """
+ Determines the overall status of the subtask based on the worst status among test results.
+ Status priorities are ordered from worst to best.
+
+ Returns:
+ str: The status with the highest priority (lowest value)
+ """
+ status_prios = {"CE": -1, "RE": 0, "WA": 1, "MLE": 2, "TLE": 3, "PA": 4, "AC": 5, "SKIPPED": 999}
+ return min([x.status for x in self.test_results], key=lambda x: status_prios[x])
+
+ @property
+ def score(self):
+ """
+ Calculates the raw score for the subtask as the minimum score across all test results.
+
+ Returns:
+ float: The rounded minimum score
+ """
+ return (
+ 0
+ if not self.test_results
+ else round(min([test_result.score for test_result in self.test_results]), self.score_precision)
+ )
+
+ @property
+ def weighted_score(self):
+ """
+ Calculates the weighted score by multiplying the raw score by the available points.
+
+ Returns:
+ float: The rounded weighted score
+ """
+ return (
+ 0
+ if not self.test_results
+ else round(
+ min([test_result.score for test_result in self.test_results]) * self.points, self.score_precision
+ )
+ )
+
+ def to_dict(self):
+ """
+ Converts the SubtaskResult to a dictionary representation.
+
+ Returns:
+ dict: Dictionary containing all subtask result data
+ """
+ return {
+ "problem": self.problem,
+ "subtask": self.subtask,
+ "score": self.score,
+ "weighted_score": self.weighted_score,
+ "points": self.points,
+ "score_precision": self.score_precision,
+ "status": self.status,
+ "test_results": [asdict(test_result) for test_result in self.test_results],
+ }
+
+
+def _extract_single_status(score: float, feedback: str) -> str:
+ """
+ Determines the status code based on the score and feedback message.
+
+ Args:
+ score: The numeric score (0.0 to 1.0)
+ feedback: The feedback message from the execution
+
+ Returns:
+ str: Status code ('CE', 'MLE', 'TLE', 'WA', 'RE', 'AC', or 'PA')
+ """
+ if score == 0.0:
+ if "Compilation error" in feedback:
+ return "CE"
+ elif "Memory limit exceeded" in feedback:
+ return "MLE"
+ elif "Time limit exceeded" in feedback:
+ return "TLE"
+ elif "Output isn't correct" in feedback:
+ return "WA"
+ else:
+ return "RE"
+ elif score == 1.0:
+ return "AC"
+ else:
+ return "PA"
+
+
+async def score_single_test_case(
+ client: PistonClient, subtask: dict, test_name: str, test_input: str, test_output: str, submission: str
+) -> TestResult:
+ """
+ Scores a single test case by running the submission against the provided input and output.
+
+ Args:
+ client: PistonClient instance for executing code
+ subtask: Dictionary containing subtask configuration
+ test_name: Name of the test case
+ test_input: Input data for the test case
+ test_output: Expected output for the test case
+ submission: Source code of the submission
+
+ Returns:
+ TestResult: Result of the test case execution
+ """
+ # Run submission for this test case
+ score, feedback = await run_submission(client, subtask, test_input, submission, test_output)
+ score = float(score)
+
+ return TestResult(
+ test_name=test_name, score=score, status=_extract_single_status(score, feedback), feedback=feedback
+ )
+
+
+async def score_subtask(
+ client: PistonClient,
+ subtask: dict,
+ submission: str,
+ test_case_run_cache: Union[dict, None] = None,
+ test_batch_size: int = 1,
+) -> SubtaskResult:
+ """
+ Scores all test cases in a subtask.
+
+ Args:
+ client: PistonClient instance for executing code
+ subtask: Dictionary containing subtask configuration
+ test_cases: Dictionary mapping test names to (input, output) tuples
+ submission: Source code of the submission
+ test_case_run_cache: Optional cache of previously run test cases
+ test_batch_size: evaluate these many test cases in parallel, then check if any of them failed (0 score): if so stop evaluating; otherwise continue with the next batch of test cases.
+ -1 to evaluate all test cases in parallel
+ Returns:
+ SubtaskResult: Result of the subtask evaluation
+ """
+ subtask_result = SubtaskResult(
+ problem=subtask["id"],
+ subtask=subtask["subtask"],
+ points=subtask["score"],
+ score_precision=subtask["score_precision"],
+ test_results=[],
+ )
+
+ # tests that are not cached
+ tests_to_run = [
+ (ti, test_name)
+ for ti, test_name in enumerate(subtask["test_names"])
+ if test_case_run_cache is None or test_name not in test_case_run_cache
+ ]
+
+ # initialize test results with cached results or empty (SKIPPED) TestResult objects
+ subtask_result.test_results = [
+ test_case_run_cache[test_name]
+ if test_case_run_cache is not None and test_name in test_case_run_cache
+ else TestResult(test_name=test_name)
+ for test_name in subtask["test_names"]
+ ]
+
+ # we skip submissions where no code was extracted
+ # no need to do anything, as we have a failed cached result
+ if not submission or any(
+ test_result.status != "SKIPPED" and test_result.score == 0.0 for test_result in subtask_result.test_results
+ ):
+ return subtask_result
+
+ if "test_cases" in subtask:
+ test_cases = subtask["test_cases"]
+ if isinstance(subtask["test_cases"], list):
+ test_cases = {test_name: test for test_name, test in zip(subtask["test_names"], subtask["test_cases"])}
+ else:
+ test_cases = load_ioi_tests(subtask["year"], subtask["id"])
+
+ # run one batch, check if any of them failed (0 score): if so stop evaluating; otherwise continue with the next batch of test cases.
+ for test_batch_to_run in batched(tests_to_run, test_batch_size):
+ results = await asyncio.gather(
+ *[
+ asyncio.create_task(
+ score_single_test_case(
+ client, subtask, test_name, test_cases[test_name][0], test_cases[test_name][1], submission
+ )
+ )
+ for _, test_name in test_batch_to_run
+ ]
+ )
+ for (ti, test_name), test_result in zip(test_batch_to_run, results):
+ if test_case_run_cache is not None:
+ test_case_run_cache[test_name] = test_result
+ subtask_result.test_results[ti] = test_result
+
+ # Stop early if it failed
+ if any(test_result.score == 0.0 for test_result in results):
+ break
+
+ return subtask_result
+
+
+async def score_subtasks(
+ client: PistonClient, subtasks: list[dict], submission: str, skip_mode: bool = True
+) -> list[SubtaskResult]:
+ """
+ Scores multiple subtasks for a submission.
+
+ Args:
+ client: PistonClient instance for executing code
+ subtasks: List of dictionaries containing subtask configurations
+ submission: Source code of the submission
+ skip_mode: If True, evaluates test by test and stops after the first failure. Otherwise, runs all tests in parallel. Should be True when evaluating a large number of submissions.
+
+ Returns:
+ list[SubtaskResult]: Results for all subtasks
+ """
+ # avoid rerunning tests present in multiple subtasks
+ test_case_run_cache = {}
+
+ return [await score_subtask(client, subtask, submission, test_case_run_cache, skip_mode) for subtask in subtasks]
+
+
+async def run_submission(
+ client: PistonClient, problem: dict, test_input: str, submission: str, test_output: str | None = None
+) -> tuple[str, str]:
+ """
+ Executes a submission against a test case using the Piston execution environment.
+
+ Args:
+ client: PistonClient instance for executing code
+ problem: Dictionary containing problem configuration
+ test_input: Input data for the test case
+ submission: Source code of the submission
+ test_output: Optional expected output for the test case
+
+ Returns:
+ tuple[str, str]: A tuple containing (score, feedback)
+ """
+ data = {
+ "files": [
+ # the actual submission
+ {"name": f"graders/{problem['id'].lower()}.cpp", "content": submission},
+ # pass the input
+ {"name": "input.txt", "content": test_input},
+ # pass the expected output
+ *([{"name": "correct_output.txt", "content": test_output}] if test_output else []),
+ # grader files
+ *({"name": name, "content": content} for name, content in problem["grader_files"] if content),
+ ],
+ "run_timeout": round(
+ (problem["time_limit"] + 3) * 1000
+ ), # +3 seconds hard limit. time limits are handled by the ioi script
+ "run_memory_limit": problem["memory_limit"],
+ }
+ return await execute_ioi(client, data)
+
+
+async def execute_ioi(client, data) -> tuple[str, str]:
+ """
+ Requests to the IOI package return the score as a float in the stdout, as well as optional feedback/errors in stderr.
+ Returns a tuple of (score, feedback).
+ """
+ response = await client.send_execute(data)
+
+ if "message" in response:
+ raise PistonError(response["message"])
+
+ if "compile" in response and response["compile"]["code"] != 0:
+ return "0", "Compilation error exit code " + str(response["compile"]["code"]) + "\n" + response["compile"][
+ "stderr"
+ ]
+
+ if "run" not in response:
+ raise PistonError(response)
+
+ if response["run"]["code"] == 1 and "MemoryError" in response["run"]["stderr"]:
+ return "0", "Memory limit exceeded"
+
+ # successful result
+ if response["run"]["stdout"]:
+ return response["run"]["stdout"], response["run"]["stderr"]
+
+ if response["run"]["signal"] == "SIGKILL":
+ return "0", "Time limit exceeded"
+
+ # other issues
+ if response["run"]["code"] != 0:
+ raise PistonError(
+ f"language={response['language']}, version={response['version']}, exit code={response['run']['code']}, stderr={response['run']['stderr']}, signal={response['run']['signal']}"
+ )
+ return "0", "Unknown error"
diff --git a/src/open_r1/utils/competitive_programming/ioi_utils.py b/src/open_r1/utils/competitive_programming/ioi_utils.py
new file mode 100644
index 000000000..02fe2b39b
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/ioi_utils.py
@@ -0,0 +1,41 @@
+from collections import defaultdict
+from functools import lru_cache
+
+from datasets import load_dataset
+
+
+def add_includes(code: str, problem_id: str) -> str:
+ """
+ Fix common compilation errors for IOI problems.
+ """
+ if not code:
+ return code
+ # has most of the useful functions
+ code_header = "#include \n"
+ # include the problem header
+ problem_header_include = f'#include "{problem_id}.h"'
+ if problem_header_include not in code:
+ code_header += problem_header_include + "\n"
+ # use namespace std since models forget std:: often
+ if "using namespace std;" not in code and "std::" not in code:
+ code_header += "\nusing namespace std;\n\n"
+ return code_header + code
+
+
+@lru_cache
+def load_ioi_tests_for_year(year: int) -> dict[str, dict[str, tuple[str, str]]]:
+ """
+ Load IOI tests for a given year.
+ """
+ tests_dataset = load_dataset("open-r1/ioi-test-cases", name=f"{year}", split="train")
+ test_cases = defaultdict(dict)
+ for test_case in tests_dataset:
+ test_cases[test_case["problem_id"]][test_case["test_name"]] = test_case["test_input"], test_case["test_output"]
+ return test_cases
+
+
+def load_ioi_tests(year: int, problem_id: str) -> dict[str, tuple[str, str]]:
+ """
+ Load IOI tests for a given year and problem id.
+ """
+ return load_ioi_tests_for_year(year)[problem_id]
diff --git a/src/open_r1/utils/competitive_programming/morph_client.py b/src/open_r1/utils/competitive_programming/morph_client.py
new file mode 100644
index 000000000..559b7f8a2
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/morph_client.py
@@ -0,0 +1,742 @@
+import asyncio
+import json
+import logging
+import os
+import tempfile
+from typing import Any, Dict, Optional, Tuple
+
+from dotenv import load_dotenv
+from open_r1.utils.import_utils import is_morph_available
+
+
+# Replace direct imports with conditional imports
+if is_morph_available():
+ from morphcloud.api import Instance, InstanceExecResponse, MorphCloudClient
+else:
+ Instance = None
+ InstanceExecResponse = None
+ MorphCloudClient = None
+
+
+# Silence verbose logs from dependencies
+logging.getLogger("paramiko").setLevel(logging.ERROR)
+logging.getLogger("httpx").setLevel(logging.ERROR)
+
+
+class MorphCloudError(Exception):
+ pass
+
+
+class MorphCloudExecutionClient:
+ def __init__(
+ self,
+ api_key: Optional[str] = None,
+ base_url: Optional[str] = None,
+ spans_log_path: Optional[str] = None,
+ ):
+ """
+ Initialize the MorphCloud execution client.
+
+ Args:
+ api_key: Optional API key for MorphCloud. If not provided, will use MORPH_API_KEY env var.
+ base_url: Optional base URL for MorphCloud API. If not provided, will use default.
+ spans_log_path: Path to log API call spans to. Defaults to 'logs/morph_api_spans.jsonl'.
+ """
+
+ self.client = MorphCloudClient(api_key=api_key, base_url=base_url)
+ self._snapshot_lock = asyncio.Lock()
+
+ async def _prepare_instance(self, snapshot_id=None) -> Instance:
+ """
+ Prepare and start a MorphCloud instance.
+
+ Args:
+ snapshot_id: Optional snapshot ID to use. If None, will get or create base snapshot.
+
+ Returns:
+ Instance: The ready-to-use MorphCloud instance
+
+ Raises:
+ TimeoutError: If instance fails to start or become ready
+ """
+
+ if not snapshot_id:
+ snapshot = await self._get_or_create_base_snapshot()
+ snapshot_id = snapshot.id
+
+ try:
+ instance = await self.client.instances.astart(
+ snapshot_id, ttl_seconds=600
+ ) # Auto-terminate after 10 minutes
+ await instance.await_until_ready(timeout=300)
+ return instance
+ except asyncio.TimeoutError as e:
+ print(f"Timeout while preparing instance: {str(e)}")
+ if instance:
+ try:
+ await instance.astop()
+ except Exception:
+ pass
+ raise
+
+ async def _prepare_files(self, data: Dict[str, Any], temp_dir: str) -> Tuple[str, Dict[str, Any], Dict[str, str]]:
+ """
+ Process files, determine problem ID, and prepare configuration.
+
+ Args:
+ data: Dictionary containing file information
+ temp_dir: Local temporary directory for file operations
+
+ Returns:
+ tuple: (problem_id, grader_config, local_files)
+
+ Raises:
+ ValueError: If problem ID cannot be determined
+ """
+ # Extract problem ID
+ problem_id = None
+ graders_files = []
+ for file in data["files"]:
+ if file["name"].startswith("graders/") and file["name"].endswith(".cpp"):
+ potential_id = os.path.basename(file["name"]).split(".")[0]
+ if potential_id not in ["grader", "manager", "stub"]:
+ problem_id = potential_id
+
+ if file["name"].startswith("graders/"):
+ graders_files.append(file)
+
+ if not problem_id:
+ raise ValueError("Could not determine problem ID from files")
+
+ grader_config = {
+ "task_type": "Batch",
+ "code": problem_id,
+ "time_limit": data["run_timeout"] / 1000,
+ "memory_limit": data["run_memory_limit"] * 1024 * 1024,
+ }
+
+ for file in graders_files:
+ if "manager.cpp" in file["name"]:
+ grader_config["task_type"] = "Communication"
+ grader_config["task_type_parameters_Communication_num_processes"] = 1
+ grader_config["task_type_parameters_Communication_user_io"] = "std_io"
+ break
+
+ config_path = os.path.join(temp_dir, "grader_config.json")
+ with open(config_path, "w") as f:
+ json.dump(grader_config, f)
+
+ local_files = {"grader_config.json": config_path}
+
+ for file in data["files"]:
+ local_path = os.path.join(temp_dir, os.path.basename(file["name"]))
+ with open(local_path, "w") as f:
+ f.write(file["content"])
+ local_files[file["name"]] = local_path
+
+ return problem_id, grader_config, local_files
+
+ async def _upload_files(self, instance: Instance, local_files: Dict[str, str]) -> bool:
+ """
+ Upload all necessary files to the instance.
+
+ Args:
+ instance: The MorphCloud instance
+ local_files: Dictionary mapping remote paths to local file paths
+
+ Returns:
+ bool: True if all uploads were successful
+
+ Raises:
+ TimeoutError: If uploads time out
+ """
+ for remote_name, local_path in local_files.items():
+ target_path = f"/workspace/{remote_name}"
+ dir_path = os.path.dirname(target_path)
+
+ if dir_path != "/workspace":
+ await instance.aexec(f"mkdir -p {dir_path}")
+
+ await instance.aupload(local_path, target_path)
+
+ await instance.aupload(local_files["grader_config.json"], "/workspace/graders/grader_config.json")
+
+ return True
+
+ async def _compile_code(self, instance: Instance) -> InstanceExecResponse:
+ """
+ Compile the code on the instance.
+
+ Args:
+ instance: The MorphCloud instance
+
+ Returns:
+ InstanceExecResponse: Result of compilation
+
+ Raises:
+ RuntimeError: If compilation fails
+ """
+ compile_result = await instance.aexec("cd /workspace && ./compile")
+
+ if compile_result.exit_code != 0:
+ raise RuntimeError(f"Compilation error exit code {compile_result.exit_code}\n{compile_result.stderr}")
+
+ return compile_result
+
+ async def _run_tests(self, instance: Instance, data: Dict[str, Any]) -> Tuple[str, str]:
+ """
+ Run tests and evaluate results.
+
+ Args:
+ instance: The MorphCloud instance
+ data: Dictionary containing runtime parameters
+
+ Returns:
+ tuple: (score, feedback)
+
+ Raises:
+ TimeoutError: If test execution times out
+ """
+ hard_timeout = data["run_timeout"] / 1000 + 3
+ run_command = f"cd /workspace && timeout {hard_timeout}s ./run"
+
+ run_result = await instance.aexec(run_command)
+
+ if run_result.exit_code == 124 or run_result.exit_code == 137 or run_result.exit_code == 143:
+ return "0", "Time limit exceeded"
+
+ if run_result.exit_code != 0 and "Memory limit exceeded" in run_result.stderr:
+ return "0", "Memory limit exceeded"
+
+ if run_result.stdout:
+ return run_result.stdout.strip(), run_result.stderr.strip()
+
+ if run_result.exit_code != 0:
+ return (
+ "0",
+ f"Runtime error with exit code {run_result.exit_code}\n{run_result.stderr}",
+ )
+
+ return "0", "Unknown error"
+
+ async def _execute_with_instance(self, instance: Instance, data: Dict[str, Any], temp_dir: str) -> Tuple[str, str]:
+ """Execute code using a prepared instance.
+
+ Args:
+ instance: Ready MorphCloud instance
+ data: Execution data
+ temp_dir: Temporary directory for file operations
+
+ Returns:
+ Tuple of (score, feedback)
+
+ Raises:
+ Exception: Passes through exceptions for retry handling
+ """
+ await instance.await_until_ready(timeout=300)
+
+ problem_id, grader_config, local_files = await self._prepare_files(data, temp_dir)
+
+ await self._upload_files(instance, local_files)
+
+ try:
+ await self._compile_code(instance)
+ except RuntimeError as e:
+ return "0", str(e)
+
+ score, feedback = await self._run_tests(instance, data)
+ return score, feedback
+
+ async def _execute(self, data: Dict[str, Any]) -> Tuple[str, str]:
+ """
+ Internal implementation of execute with no retry logic.
+
+ Args:
+ data: Dictionary containing execution data
+
+ Returns:
+ Tuple of (score, feedback)
+
+ Raises:
+ Exception: If execution fails
+ """
+ instance = None
+
+ # Set timeouts to ensure we don't block indefinitely
+ # INSTANCE_TIMEOUT = 300 # 5 minutes for instance operations
+ TOTAL_EXECUTION_TIMEOUT = 600 # 10 minutes total execution time
+
+ with tempfile.TemporaryDirectory(prefix="morph_exec_") as temp_dir:
+ snapshot = await self._get_or_create_base_snapshot()
+ instance = await self.client.instances.astart(
+ snapshot.id, ttl_seconds=600
+ ) # Auto-terminate after 10 minutes
+
+ async with instance:
+ # Use asyncio.wait_for to add overall timeout to the execution process
+ return await asyncio.wait_for(
+ self._execute_with_instance(instance, data, temp_dir),
+ timeout=TOTAL_EXECUTION_TIMEOUT,
+ )
+
+ async def execute(self, data: Dict[str, Any]) -> Tuple[str, str]:
+ """
+ Execute code on MorphCloud based on the provided data with enhanced debugging and recovery.
+
+ Orchestrates the following steps with proper error handling and retries:
+ 1. Prepare an instance (with retry)
+ 2. Set up workspace (with retry)
+ 3. Prepare and upload files (with retry)
+ 4. Compile code (with retry)
+ 5. Run tests (with retry)
+
+ Args:
+ data: Dictionary containing:
+ - files: List of file objects with name and content fields
+ - run_timeout: Timeout in milliseconds
+ - run_memory_limit: Memory limit in MB
+
+ Returns:
+ Tuple of (score, feedback) where:
+ - score is a string representation of a float between 0.0 and 1.0
+ - feedback is a string with execution details
+ """
+ # TODO: would be faster to pass info about the subtask as well to create a snapshot per subtask
+ # would cache the uploads of all files other than the submission: input.txt, correct_output.txt, grader files
+ # rather than reusing the snapshot that only has the compile/run scripts on it
+ # currently, run_submission -> client.execute(data) does not easily pass subtask info
+
+ # Retry configuration
+ max_retries = 4
+ base_delay = 1.0
+
+ # Try execution with retries and exponential backoff
+ for attempt in range(max_retries + 1):
+ try:
+ return await self._execute(data)
+
+ except asyncio.TimeoutError:
+ if attempt < max_retries:
+ print(f"Execution timed out, retrying ({attempt + 1}/{max_retries})")
+ else:
+ return "0", "Execution timed out after multiple retries"
+
+ except Exception as e:
+ # Calculate exponential backoff
+ if attempt < max_retries:
+ retry_delay = min(base_delay * (2**attempt), 30) # Exponential backoff, capped at 30 seconds
+
+ print(
+ f"Execution failed with {type(e).__name__}: {str(e)}, retrying in {retry_delay:.2f}s ({attempt + 1}/{max_retries})"
+ )
+ await asyncio.sleep(retry_delay)
+ else:
+ print(f"Execution failed after {max_retries} retries: {type(e).__name__}: {str(e)}")
+ return "0", f"Execution failed after multiple retries: {str(e)}"
+
+ async def _get_or_create_base_snapshot(self):
+ """Get or create a snapshot with the necessary dependencies and scripts for evaluation."""
+
+ async with self._snapshot_lock:
+ base_snapshots = await self.client.snapshots.alist(digest="ioi-evaluation-morph")
+
+ if not base_snapshots:
+ print("Creating base snapshot with build-essential cmake and g++")
+
+ # Create base snapshot with minimal specs
+ base_snapshot = await self.client.snapshots.acreate(
+ vcpus=2,
+ memory=4096,
+ disk_size=10240,
+ metadata={"purpose": "ioi_evaluation"},
+ )
+
+ # Start a temporary instance from the base snapshot
+ temp_instance = await self.client.instances.astart(
+ base_snapshot.id, ttl_seconds=900
+ ) # Auto-terminate after 15 minutes
+
+ try:
+ # Wait for the instance to be ready
+ await temp_instance.await_until_ready(timeout=300)
+
+ # Get script contents
+ compile_script = await self._get_compile_script()
+ run_script = await self._get_run_script()
+
+ # Use temporary directory to store scripts
+ with tempfile.TemporaryDirectory(prefix="morph_setup_") as temp_dir:
+ # Create paths for script files
+ compile_path = os.path.join(temp_dir, "compile.sh")
+ run_path = os.path.join(temp_dir, "run.sh")
+
+ # Write scripts to temp files
+ with open(compile_path, "w") as f:
+ f.write(compile_script)
+
+ with open(run_path, "w") as f:
+ f.write(run_script)
+
+ async with temp_instance:
+ # Install dependencies
+ await temp_instance.aexec("apt-get update && apt-get install -y build-essential cmake g++")
+
+ # Create workspace directory
+ await temp_instance.aexec(
+ "mkdir -p /workspace && mkdir -p /workspace/graders && chmod 777 /workspace"
+ )
+
+ # Upload scripts to instance
+ await temp_instance.aupload(compile_path, "/workspace/compile")
+ await temp_instance.aupload(run_path, "/workspace/run")
+
+ # Make scripts executable
+ await temp_instance.aexec("chmod +x /workspace/compile /workspace/run")
+
+ # Create snapshot from the prepared instance
+ final_snapshot = await temp_instance.asnapshot(digest="ioi-evaluation-morph")
+
+ except Exception as e:
+ # Ensure instance is stopped if anything fails
+ await temp_instance.astop()
+ raise e
+ else:
+ final_snapshot = base_snapshots[0]
+
+ return final_snapshot
+
+ async def _get_compile_script(self):
+ """Get the compile script content."""
+ return """#!/bin/bash
+
+manager_files=() # Array to store manager filenames
+current_dir="$(pwd)"
+
+# Checker compilation path
+checker_dir="$current_dir/checker"
+checker_src="$checker_dir/checker.cpp"
+
+if [ -e "$checker_src" ]; then
+ echo "Compiling checker"
+ checker_exe="$checker_dir/checker"
+ g++ -x c++ -std=gnu++17 -O2 -o "$checker_exe" "$checker_src"
+ chmod +x "$checker_exe"
+ if [ $? -ne 0 ]; then
+ echo "Could not compile checker" >&2
+ exit 1
+ fi
+ echo "Compiled checker"
+else
+ echo "No checker found at $checker_src"
+fi
+
+# Graders path
+graders_dir="$current_dir/graders"
+if [ ! -e "$graders_dir" ]; then
+ echo "Grader folder was not found" >&2
+ exit 1
+fi
+
+# Find and compile manager if it exists
+manager_src="$graders_dir/manager.cpp"
+if [ -e "$manager_src" ]; then
+ echo "Compiling manager"
+ manager_exe="$graders_dir/manager"
+ g++ -x c++ -std=gnu++17 -O2 -o "$manager_exe" "$manager_src"
+ chmod +x "$manager_exe"
+ if [ $? -ne 0 ]; then
+ echo "Could not compile manager" >&2
+ exit 1
+ fi
+ manager_files+=("manager")
+fi
+
+# Process other graders
+graders_list=($(ls "$graders_dir" | grep -v 'manager.cpp'))
+for grader_name in "${graders_list[@]}"; do
+ manager_files+=("$grader_name")
+done
+
+# Extract problem name and compile necessary files
+problem_name='?'
+for file in "${manager_files[@]}"; do
+ if [[ "$file" == *.h && "$file" != "testlib.h" ]]; then
+ problem_name="${file%.h}"
+ echo "Problem name: $problem_name"
+ break
+ fi
+done
+
+files_to_compile=("graders/$problem_name.cpp")
+[ -e graders/grader.cpp ] && files_to_compile+=("graders/grader.cpp")
+[ -e graders/stub.cpp ] && files_to_compile+=("graders/stub.cpp")
+
+g++ -DEVAL -std=gnu++17 -O2 -pipe -s -o graders/"$problem_name" "${files_to_compile[@]}"
+if [ $? -ne 0 ]; then
+ echo "Failed to compile $problem_name" >&2
+ exit 1
+fi
+chmod +x graders/"$problem_name"
+echo "Compiled $problem_name from ${files_to_compile[@]} successfully"
+
+echo "Manager files: ${manager_files[@]}"
+"""
+
+ async def _get_run_script(self):
+ """Get the run script content."""
+ return """#!/usr/bin/env bash
+# disable stack limit so you don't get RE with recursion
+ulimit -s unlimited
+# some problems have 10MB+ input/output files in their test cases and you might get RE. uncomment if needed
+# ulimit -f 2097152
+
+# Check if grader_config.json exists
+if [ ! -f "graders/grader_config.json" ]; then
+ echo "Error: graders/grader_config.json not found" >&2
+ echo "Current directory contents:" >&2
+ find . -type f -o -type d | sed -e 's/[^-][^\/]*\// |/g' -e 's/|\([^ ]\)/|-\1/' >&2
+ exit 1
+fi
+
+# Read task type, code, and time limit from grader_config.json using grep and sed
+TASK_TYPE=$(grep -o '"task_type":[^,}]*' graders/grader_config.json | sed 's/"task_type":\\s*"\\([^"]*\\)"/\\1/')
+TASK_NAME=$(grep -o '"code":[^,}]*' graders/grader_config.json | sed 's/"code":\\s*"\\([^"]*\\)"/\\1/')
+TIME_LIMIT=$(grep -o '"time_limit":[^,}]*' graders/grader_config.json | sed 's/"time_limit":\\s*\\([^,}]*\\)/\\1/')
+MEMORY_LIMIT=$(grep -o '"memory_limit":[^,}]*' graders/grader_config.json | sed 's/"memory_limit":\\s*\\([^,}]*\\)/\\1/')
+TASK_EXECUTABLE="graders/$TASK_NAME"
+
+# Set memory limit in KB (convert from bytes)
+MEMORY_LIMIT_KB=0
+if [ -n "$MEMORY_LIMIT" ]; then
+ MEMORY_LIMIT_KB=$(($MEMORY_LIMIT / 1024))
+ # Set the memory limit for the entire script and all child processes
+ ulimit -v $MEMORY_LIMIT_KB
+fi
+
+# "Securely" handle the correct output file
+CORRECT_OUTPUT=""
+if [ -f "correct_output.txt" ]; then
+ # Read the content and immediately remove the file
+ CORRECT_OUTPUT=$(cat correct_output.txt)
+ rm -f correct_output.txt
+fi
+
+# Create a temporary file for solution output
+SOLUTION_OUTPUT=$(mktemp)
+
+# Global variables for process tracking
+declare -a ALL_PIDS
+declare -a FIFO_DIRS
+
+# Define cleanup function - simplified assuming timeout exists
+function cleanup {
+ # Kill all tracked processes silently
+ exec 2>/dev/null
+ for pid in "${ALL_PIDS[@]:-}"; do
+ kill -9 "$pid" 2>/dev/null || true
+ done
+
+ # Clean up FIFO directories
+ for dir in "${FIFO_DIRS[@]:-}"; do
+ [ -d "$dir" ] && rm -rf "$dir"
+ done
+
+ # Clean up temporary files
+ rm -f "$SOLUTION_OUTPUT" || true
+ exec 2>&2
+}
+
+# Set up signal handling
+trap cleanup EXIT INT TERM
+
+# Function to handle exit codes consistently across task types
+function handle_exit_code {
+ local exit_code=$1
+
+ # Check for known timeout exit codes:
+ # - 124: standard timeout exit code
+ # - 137: SIGKILL (128+9), used for hard timeouts
+ # - 143: SIGTERM (128+15), can also be used for timeouts
+ if [ $exit_code -eq 124 ] || [ $exit_code -eq 137 ] || [ $exit_code -eq 143 ]; then
+ echo "0"
+ echo "Time limit exceeded (${TIME_LIMIT}s)" >&2
+ return 124
+ # All other non-zero exit codes should be treated as runtime errors
+ elif [ $exit_code -ne 0 ]; then
+ echo "0"
+ echo "Runtime error with exit code $exit_code" >&2
+ return $exit_code
+ fi
+
+ # Success case - return 0
+ return 0
+}
+
+# Function to run a command with timeout (simplified assuming timeout exists)
+function run_with_timeout {
+ local soft_limit=$1; shift
+ local command_to_run="$@"
+
+ timeout --preserve-status "$soft_limit" "$@"
+ return $?
+}
+
+case "$TASK_TYPE" in
+ "Batch")
+ # Simple batch execution with timeout
+ run_with_timeout "$TIME_LIMIT" ./$TASK_EXECUTABLE < input.txt > "$SOLUTION_OUTPUT"
+ exit_code=$?
+
+ # Handle non-zero exit codes
+ handle_exit_code $exit_code
+ if [ $? -ne 0 ]; then
+ exit $?
+ fi
+
+ # Check the output if we have a correct output
+ if [ -n "$CORRECT_OUTPUT" ]; then
+ # Restore the correct output file
+ echo "$CORRECT_OUTPUT" > correct_output.txt
+
+ # Check if there's a custom checker
+ if [ -f "checker/checker" ]; then
+ # Let the checker handle everything
+ ./checker/checker input.txt correct_output.txt "$SOLUTION_OUTPUT"
+ exit $?
+ else
+ # Simple diff-based checking
+ if diff -bq <(echo "$CORRECT_OUTPUT") "$SOLUTION_OUTPUT" >/dev/null; then
+ echo "1"
+ echo "Output is correct (diff)" >&2
+ else
+ echo "0"
+ echo "Output isn't correct (diff)" >&2
+ exit 0
+ fi
+ fi
+ else
+ # If no correct output was provided, just output the solution's output
+ cat "$SOLUTION_OUTPUT"
+ fi
+ ;;
+
+ "Communication")
+ # Read Communication-specific parameters
+ NUM_PROCESSES=$(grep -o '"task_type_parameters_Communication_num_processes":[^,}]*' graders/grader_config.json | sed 's/.*:\\s*\\([0-9]*\\)/\\1/' || true)
+ if [ -z "$NUM_PROCESSES" ]; then
+ NUM_PROCESSES=1
+ fi
+ USER_IO=$(grep -o '"task_type_parameters_Communication_user_io":[^,}]*' graders/grader_config.json | sed 's/.*:\\s*"\\([^"]*\\)"/\\1/' || echo "std_io")
+
+ # Read custom manager arguments if they exist
+ MANAGER_CUSTOM_ARGS=""
+ if grep -q '"task_type_parameters_Communication_manager_args"' graders/grader_config.json; then
+ MANAGER_CUSTOM_ARGS=$(grep -o '"task_type_parameters_Communication_manager_args":[^,}]*' graders/grader_config.json | sed 's/.*:\\s*"\\([^"]*\\)"/\\1/')
+ fi
+
+ # Create temporary directories for FIFOs
+ for i in $(seq 0 $((NUM_PROCESSES-1))); do
+ FIFO_DIRS[$i]=$(mktemp -d)
+
+ # Create FIFOs for this process
+ mkfifo "${FIFO_DIRS[$i]}/u${i}_to_m"
+ mkfifo "${FIFO_DIRS[$i]}/m_to_u${i}"
+ chmod 755 "${FIFO_DIRS[$i]}"
+ chmod 666 "${FIFO_DIRS[$i]}/u${i}_to_m" "${FIFO_DIRS[$i]}/m_to_u${i}"
+ done
+
+ # Prepare manager arguments
+ MANAGER_ARGS=""
+ for i in $(seq 0 $((NUM_PROCESSES-1))); do
+ MANAGER_ARGS="$MANAGER_ARGS ${FIFO_DIRS[$i]}/u${i}_to_m ${FIFO_DIRS[$i]}/m_to_u${i}"
+ done
+
+ # Add custom manager arguments if specified
+ if [ -n "$MANAGER_CUSTOM_ARGS" ]; then
+ MANAGER_ARGS="$MANAGER_ARGS $MANAGER_CUSTOM_ARGS"
+ fi
+
+ # Start all user processes first
+ for i in $(seq 0 $((NUM_PROCESSES-1))); do
+ if [ "$USER_IO" = "fifo_io" ]; then
+ # Pass FIFOs as arguments
+ ARGS="${FIFO_DIRS[$i]}/m_to_u${i} ${FIFO_DIRS[$i]}/u${i}_to_m"
+ if [ "$NUM_PROCESSES" -ne 1 ]; then
+ ARGS="$ARGS $i"
+ fi
+ ./$TASK_EXECUTABLE $ARGS &
+ ALL_PIDS+=($!)
+ else
+ # Use stdin/stdout redirection
+ if [ "$NUM_PROCESSES" -ne 1 ]; then
+ ./$TASK_EXECUTABLE "$i" < "${FIFO_DIRS[$i]}/m_to_u${i}" > "${FIFO_DIRS[$i]}/u${i}_to_m" 2>/dev/null &
+ ALL_PIDS+=($!)
+ else
+ ./$TASK_EXECUTABLE < "${FIFO_DIRS[$i]}/m_to_u${i}" > "${FIFO_DIRS[$i]}/u${i}_to_m" 2>/dev/null &
+ ALL_PIDS+=($!)
+ fi
+ fi
+ done
+
+ # Run the manager with timeout using direct pipe from input.txt
+ run_with_timeout "$TIME_LIMIT" ./graders/manager $MANAGER_ARGS < input.txt > "$SOLUTION_OUTPUT"
+
+ exit_code=$?
+
+ # Handle non-zero exit codes
+ handle_exit_code $exit_code
+ if [ $? -ne 0 ]; then
+ exit $?
+ fi
+
+ # Check the output if we have a correct output AND there's a checker (otherwise we assume the manager handles everything)
+ if [ -n "$CORRECT_OUTPUT" ] && [ -f "checker/checker" ]; then
+ # Restore the correct output file
+ echo "$CORRECT_OUTPUT" > correct_output.txt
+
+ # Let the checker handle it
+ ./checker/checker input.txt correct_output.txt "$SOLUTION_OUTPUT"
+ exit $?
+ else
+ # we assume the manager handles it
+ cat "$SOLUTION_OUTPUT"
+ fi
+ ;;
+
+ *)
+ echo "0"
+ echo "Unsupported task type \"$TASK_TYPE\"" >&2
+ exit 1
+ ;;
+esac
+"""
+
+
+def get_morph_client_from_env(session=None) -> MorphCloudExecutionClient:
+ """
+ Creates a MorphCloudExecutionClient instance using environment variables.
+
+ Environment variables:
+ MORPH_API_KEY: API key for MorphCloud
+
+ Args:
+ session: Optional aiohttp.ClientSession to use for HTTP requests
+
+ Returns:
+ MorphCloudExecutionClient: A configured MorphCloud execution client
+ """
+ if not is_morph_available():
+ raise ImportError(
+ "MorphCloud is not available and required for this function. Please install MorphCloud with "
+ "`pip install morphcloud` and add an API key to a `.env` file."
+ )
+
+ load_dotenv()
+ api_key = os.environ.get("MORPH_API_KEY")
+ if not api_key:
+ raise ValueError("MORPH_API_KEY environment variable is required")
+
+ return MorphCloudExecutionClient(api_key=api_key)
+
+
+# noqa: W293
diff --git a/src/open_r1/utils/competitive_programming/piston_client.py b/src/open_r1/utils/competitive_programming/piston_client.py
new file mode 100644
index 000000000..7dfc9a5ec
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/piston_client.py
@@ -0,0 +1,224 @@
+import asyncio
+import os
+import random
+import re
+import subprocess
+from collections import Counter
+from functools import lru_cache
+
+import aiohttp
+
+
+class PistonError(Exception):
+ pass
+
+
+@lru_cache(maxsize=1)
+def get_piston_client_from_env(session=None):
+ piston_endpoints = os.getenv("PISTON_ENDPOINTS")
+ if piston_endpoints is None:
+ raise ValueError(
+ "For IOI/CF problems Piston endpoints running our IOI package are required. Please add a list of valid Piston endpoints to a PISTON_ENDPOINTS variable in a `.env` file."
+ )
+ piston_endpoints = sorted(
+ piston_endpoints.split(",") if piston_endpoints != "slurm" else get_slurm_piston_endpoints()
+ )
+ gpu_nb = int(os.getenv("LOCAL_RANK", 0)) # per‑GPU index
+ world = int(os.getenv("WORLD_SIZE", 1)) # total GPUs
+ if world > 1:
+ print(f"Using a subset of piston endpoints for GPU#{gpu_nb}")
+ piston_endpoints = piston_endpoints[gpu_nb::world]
+ random.shuffle(piston_endpoints)
+ max_requests_per_endpoint = os.getenv("PISTON_MAX_REQUESTS_PER_ENDPOINT", "1")
+ return PistonClient(piston_endpoints, session, max_requests_per_endpoint=int(max_requests_per_endpoint))
+
+
+class PistonClient:
+ """
+ A client that will automatically load balance across multiple Piston (https://github.com/engineer-man/piston) workers.
+ This assumes piston is running our custom cms_ioi package: https://github.com/guipenedo/piston/releases/
+ We recommend starting the instances with the following script as otherwise some IOI problems will hit default limits:
+ ```
+ export PISTON_COMPILE_TIMEOUT=60000
+ export PISTON_RUN_TIMEOUT=60000
+ export PISTON_OUTPUT_MAX_SIZE=1000000000
+ export PISTON_MAX_FILE_SIZE=1000000000
+ export PISTON_DISABLE_NETWORKING=true
+ export PISTON_REPO_URL=https://github.com/guipenedo/piston/releases/download/pkgs/index
+ mkdir /piston
+
+ sed -i '/app.use(body_parser.urlencoded/c\ app.use(body_parser.urlencoded({ extended: true, limit: \"512mb\" }));' src/index.js
+ sed -i '/app.use(body_parser.json/c\ app.use(body_parser.json({ limit: \"512mb\" }));' src/index.js
+
+ # Start server in background
+ node src```
+
+ Piston docs for API usage: https://piston.readthedocs.io/en/latest/api-v2/
+ """
+
+ def __init__(
+ self,
+ base_endpoint: str | list[str] = "http://ip-10-53-80-65:3223/api/v2",
+ session=None,
+ max_requests_per_endpoint=1,
+ ):
+ self.max_requests_per_endpoint = max_requests_per_endpoint
+ self.base_endpoints = [base_endpoint] if isinstance(base_endpoint, str) else base_endpoint
+ if len(self.base_endpoints) == 0:
+ raise ValueError("No Piston endpoints provided. Please check your PISTON_ENDPOINTS environment variable.")
+ self.endpoint_ids = {endpoint: i for i, endpoint in enumerate(self.base_endpoints)}
+
+ self._session = session
+ self.endpoint_tokens = asyncio.Queue(maxsize=max_requests_per_endpoint * len(self.base_endpoints))
+
+ for _ in range(max_requests_per_endpoint):
+ for base_endpoint in self.base_endpoints:
+ self.endpoint_tokens.put_nowait(base_endpoint)
+ self._endpoint_failures = Counter()
+ self._unhealthy_endpoints = set()
+ self._endpoint_failures_lock = asyncio.Lock()
+
+ @property
+ def session(self):
+ if self._session is None:
+ self._session = aiohttp.ClientSession(
+ timeout=aiohttp.ClientTimeout(sock_read=30),
+ connector=aiohttp.TCPConnector(
+ limit=self.max_requests_per_endpoint * len(self.base_endpoints),
+ ttl_dns_cache=300,
+ keepalive_timeout=5 * 60,
+ ),
+ )
+ return self._session
+
+ async def _wait_for_endpoint(self):
+ endpoint = await self.endpoint_tokens.get()
+ return endpoint
+
+ async def _release_endpoint(self, endpoint):
+ await self.endpoint_tokens.put(endpoint)
+
+ async def _send_request(self, endpoint, route, data=None, method="post"):
+ async with self.session.request(
+ method, f"{endpoint.rstrip('/')}/{route}", json=data, headers={"Content-Type": "application/json"}
+ ) as response:
+ return await response.json(content_type=None)
+
+ async def _send_to_all(self, route, data=None, method="post"):
+ return await asyncio.gather(
+ *[self._send_request(endpoint, route, data, method) for endpoint in self.base_endpoints]
+ )
+
+ async def _send_to_one(self, endpoint, route, data=None, method="post"):
+ return await self._send_request(endpoint, route, data, method)
+
+ async def install_package(self, language, version):
+ return await self._send_to_all("packages", {"language": language, "version": version}, method="post")
+
+ async def uninstall_package(self, language, version):
+ return await self._send_to_all("packages", {"language": language, "version": version}, method="delete")
+
+ async def get_supported_runtimes(self):
+ return await self._send_to_all("runtimes", method="get")
+
+ async def _check_failed_endpoint(self, endpoint):
+ async with self._endpoint_failures_lock:
+ if endpoint in self._unhealthy_endpoints:
+ return
+ try:
+ await asyncio.sleep(5)
+ await self.get_supported_runtimes()
+ except Exception as e:
+ print(f"Error checking endpoint {endpoint}, dropping it ({e})")
+ self._unhealthy_endpoints.add(endpoint)
+ if len(self._unhealthy_endpoints) >= len(self.base_endpoints):
+ raise PistonError("All endpoints are unhealthy. Please check your Piston workers.")
+
+ async def send_execute(self, data, language="cms_ioi", max_retries=5):
+ data = data | {
+ "language": language,
+ "version": "*",
+ }
+
+ base_delay = 1.0
+
+ status = None
+ endpoint = None
+
+ for attempt in range(max_retries + 1):
+ try:
+ endpoint = await self._wait_for_endpoint()
+ if attempt > 0:
+ await asyncio.sleep(1)
+ async with self.session.post(
+ f"{endpoint.rstrip('/')}/execute", json=data, headers={"Content-Type": "application/json"}
+ ) as response:
+ status = response.status
+ res_json = await response.json(content_type=None)
+
+ if status != 200:
+ raise PistonError(f"Server error. status={status}. {res_json}")
+ if res_json is None:
+ raise PistonError(f"Empty response. status={status}")
+ # piston overloaded
+ if "run" in res_json and "Resource temporarily unavailable" in res_json["run"].get("stderr", ""):
+ raise PistonError(f"Piston overloaded: {res_json['run']['stderr']}")
+ return res_json
+
+ except (PistonError, asyncio.TimeoutError, aiohttp.ClientConnectionError, RuntimeError) as e:
+ # Only retry if we haven't reached max retries yet
+ if attempt < max_retries:
+ # Calculate backoff with jitter
+ delay = min(base_delay * (2**attempt), 10) # Exponential backoff, capped at 10 seconds
+ jitter = delay * 0.2 * (2 * asyncio.get_event_loop().time() % 1 - 0.5) # Add ±10% jitter
+ retry_delay = delay + jitter
+ print(f"Retrying in {retry_delay:.2f} seconds [{self.endpoint_ids[endpoint]}] {endpoint} - {e}")
+
+ # special case: worker died
+ if isinstance(e, aiohttp.ClientConnectionError) and "Connect call failed" in str(e):
+ await self._check_failed_endpoint(endpoint)
+ else:
+ # hopefully we won't get this one again
+ await self._release_endpoint(endpoint)
+ endpoint = None
+
+ await asyncio.sleep(retry_delay)
+ else:
+ await self._check_failed_endpoint(endpoint)
+ except Exception as e:
+ print(f"Propagating exception {type(e)}: {e}")
+ raise e
+ finally:
+ # Ensure endpoint is always released, even if an exception occurs
+ if endpoint is not None:
+ try:
+ await self._release_endpoint(endpoint)
+ except Exception as e:
+ print(f"Error releasing endpoint {endpoint}: {e}")
+ endpoint = None
+
+
+def get_slurm_piston_endpoints():
+ """Get list of active piston worker endpoints from squeue output"""
+ # Run squeue command to get job name, hostname and status, filtering for RUNNING state
+ result = subprocess.run(
+ ["squeue", '--format="%j %N %T"', "--noheader", "--states=RUNNING"], capture_output=True, text=True
+ )
+
+ # Split output into lines and skip header
+ lines = result.stdout.strip().split("\n")
+
+ endpoints = []
+ for line in lines:
+ # Parse job name from squeue output
+ fields = line.split()
+ job_name = fields[0].strip('"') # Remove quotes
+ hostname = fields[1]
+
+ # Extract port if job name matches pattern
+ match = re.match(r"piston-worker-(\d+)", job_name)
+ if match:
+ port = match.group(1)
+ endpoints.append(f"http://{hostname}:{port}/api/v2")
+
+ return endpoints
diff --git a/src/open_r1/utils/competitive_programming/utils.py b/src/open_r1/utils/competitive_programming/utils.py
new file mode 100644
index 000000000..7e1bf730f
--- /dev/null
+++ b/src/open_r1/utils/competitive_programming/utils.py
@@ -0,0 +1,11 @@
+from itertools import islice
+
+
+def batched(iterable, n):
+ "Batch data into lists of length n. The last batch may be shorter."
+ # batched('ABCDEFG', 3) --> ABC DEF G
+ if n < 1:
+ return iterable
+ it = iter(iterable)
+ while batch := list(islice(it, n)):
+ yield batch
diff --git a/src/open_r1/utils/data.py b/src/open_r1/utils/data.py
new file mode 100644
index 000000000..b151a8a7f
--- /dev/null
+++ b/src/open_r1/utils/data.py
@@ -0,0 +1,65 @@
+import logging
+
+import datasets
+from datasets import DatasetDict, concatenate_datasets
+
+from ..configs import ScriptArguments
+
+
+logger = logging.getLogger(__name__)
+
+
+def get_dataset(args: ScriptArguments) -> DatasetDict:
+ """Load a dataset or a mixture of datasets based on the configuration.
+
+ Args:
+ args (ScriptArguments): Script arguments containing dataset configuration.
+
+ Returns:
+ DatasetDict: The loaded datasets.
+ """
+ if args.dataset_name and not args.dataset_mixture:
+ logger.info(f"Loading dataset: {args.dataset_name}")
+ return datasets.load_dataset(args.dataset_name, args.dataset_config)
+ elif args.dataset_mixture:
+ logger.info(f"Creating dataset mixture with {len(args.dataset_mixture.datasets)} datasets")
+ seed = args.dataset_mixture.seed
+ datasets_list = []
+
+ for dataset_config in args.dataset_mixture.datasets:
+ logger.info(f"Loading dataset for mixture: {dataset_config.id} (config: {dataset_config.config})")
+ ds = datasets.load_dataset(
+ dataset_config.id,
+ dataset_config.config,
+ split=dataset_config.split,
+ )
+ if dataset_config.columns is not None:
+ ds = ds.select_columns(dataset_config.columns)
+ if dataset_config.weight is not None:
+ ds = ds.shuffle(seed=seed).select(range(int(len(ds) * dataset_config.weight)))
+ logger.info(
+ f"Subsampled dataset '{dataset_config.id}' (config: {dataset_config.config}) with weight={dataset_config.weight} to {len(ds)} examples"
+ )
+
+ datasets_list.append(ds)
+
+ if datasets_list:
+ combined_dataset = concatenate_datasets(datasets_list)
+ combined_dataset = combined_dataset.shuffle(seed=seed)
+ logger.info(f"Created dataset mixture with {len(combined_dataset)} examples")
+
+ if args.dataset_mixture.test_split_size is not None:
+ combined_dataset = combined_dataset.train_test_split(
+ test_size=args.dataset_mixture.test_split_size, seed=seed
+ )
+ logger.info(
+ f"Split dataset into train and test sets with test size: {args.dataset_mixture.test_split_size}"
+ )
+ return combined_dataset
+ else:
+ return DatasetDict({"train": combined_dataset})
+ else:
+ raise ValueError("No datasets were loaded from the mixture configuration")
+
+ else:
+ raise ValueError("Either `dataset_name` or `dataset_mixture` must be provided")
diff --git a/src/open_r1/utils/evaluation.py b/src/open_r1/utils/evaluation.py
index 9cbac82d4..e79cd2972 100644
--- a/src/open_r1/utils/evaluation.py
+++ b/src/open_r1/utils/evaluation.py
@@ -7,6 +7,7 @@
if TYPE_CHECKING:
from trl import GRPOConfig, SFTConfig, ModelConfig
+import base64
import os
@@ -24,7 +25,11 @@
def register_lighteval_task(
- configs: Dict[str, str], eval_suite: str, task_name: str, task_list: str, num_fewshot: int = 0
+ configs: Dict[str, str],
+ eval_suite: str,
+ task_name: str,
+ task_list: str,
+ num_fewshot: int = 0,
):
"""Registers a LightEval task configuration.
@@ -46,8 +51,12 @@ def register_lighteval_task(
LIGHTEVAL_TASKS = {}
-register_lighteval_task(LIGHTEVAL_TASKS, "custom", "math_500", "math_500", 0)
-register_lighteval_task(LIGHTEVAL_TASKS, "custom", "aime24", "aime24", 0)
+register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "math_500", "math_500", 0)
+register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "aime24", "aime24", 0)
+register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "aime25", "aime25", 0)
+register_lighteval_task(LIGHTEVAL_TASKS, "lighteval", "gpqa", "gpqa:diamond", 0)
+register_lighteval_task(LIGHTEVAL_TASKS, "extended", "lcb", "lcb:codegeneration", 0)
+register_lighteval_task(LIGHTEVAL_TASKS, "extended", "lcb_v4", "lcb:codegeneration_v4", 0)
def get_lighteval_tasks():
@@ -58,7 +67,9 @@ def get_lighteval_tasks():
def run_lighteval_job(
- benchmark: str, training_args: Union["SFTConfig", "GRPOConfig"], model_args: "ModelConfig"
+ benchmark: str,
+ training_args: Union["SFTConfig", "GRPOConfig"],
+ model_args: "ModelConfig",
) -> None:
task_list = LIGHTEVAL_TASKS[benchmark]
model_name = training_args.hub_model_id
@@ -68,13 +79,14 @@ def run_lighteval_job(
if get_param_count_from_repo_id(model_name) >= 30_000_000_000:
tensor_parallel = True
else:
+ num_gpus = 2 # Hack while cluster is full
tensor_parallel = False
cmd = VLLM_SLURM_PREFIX.copy()
cmd_args = [
f"--gres=gpu:{num_gpus}",
f"--job-name=or1_{benchmark}_{model_name.split('/')[-1]}_{model_revision}",
- "slurm/eval_callback.slurm",
+ "slurm/evaluate.slurm",
benchmark,
f'"{task_list}"',
model_name,
@@ -83,7 +95,10 @@ def run_lighteval_job(
f"{model_args.trust_remote_code}",
]
if training_args.system_prompt is not None:
- cmd_args.append(f"--system_prompt={training_args.system_prompt}")
+ # encode to base64 to avoid issues with special characters
+ # we decode in the sbatch script
+ prompt_encoded = base64.b64encode(training_args.system_prompt.encode()).decode()
+ cmd_args.append(prompt_encoded)
cmd[-1] += " " + " ".join(cmd_args)
subprocess.run(cmd, check=True)
diff --git a/src/open_r1/utils/hub.py b/src/open_r1/utils/hub.py
index 8ac53a384..25c4311c7 100644
--- a/src/open_r1/utils/hub.py
+++ b/src/open_r1/utils/hub.py
@@ -76,7 +76,8 @@ def check_hub_revision_exists(training_args: SFTConfig | GRPOConfig):
# If the revision exists, we next check it has a README file
if training_args.hub_model_revision in revisions:
repo_files = list_repo_files(
- repo_id=training_args.hub_model_id, revision=training_args.hub_model_revision
+ repo_id=training_args.hub_model_id,
+ revision=training_args.hub_model_revision,
)
if "README.md" in repo_files and training_args.overwrite_hub_revision is False:
raise ValueError(
diff --git a/src/open_r1/utils/import_utils.py b/src/open_r1/utils/import_utils.py
new file mode 100644
index 000000000..5d6624302
--- /dev/null
+++ b/src/open_r1/utils/import_utils.py
@@ -0,0 +1,30 @@
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from transformers.utils.import_utils import _is_package_available
+
+
+# Use same as transformers.utils.import_utils
+_e2b_available = _is_package_available("e2b")
+
+
+def is_e2b_available() -> bool:
+ return _e2b_available
+
+
+_morph_available = _is_package_available("morphcloud")
+
+
+def is_morph_available() -> bool:
+ return _morph_available
diff --git a/src/open_r1/utils/model_utils.py b/src/open_r1/utils/model_utils.py
new file mode 100644
index 000000000..8191c17ea
--- /dev/null
+++ b/src/open_r1/utils/model_utils.py
@@ -0,0 +1,42 @@
+import torch
+from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizer
+
+from trl import ModelConfig, get_kbit_device_map, get_quantization_config
+
+from ..configs import GRPOConfig, SFTConfig
+
+
+def get_tokenizer(model_args: ModelConfig, training_args: SFTConfig | GRPOConfig) -> PreTrainedTokenizer:
+ """Get the tokenizer for the model."""
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_args.model_name_or_path,
+ revision=model_args.model_revision,
+ trust_remote_code=model_args.trust_remote_code,
+ )
+
+ if training_args.chat_template is not None:
+ tokenizer.chat_template = training_args.chat_template
+
+ return tokenizer
+
+
+def get_model(model_args: ModelConfig, training_args: SFTConfig | GRPOConfig) -> AutoModelForCausalLM:
+ """Get the model"""
+ torch_dtype = (
+ model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
+ )
+ quantization_config = get_quantization_config(model_args)
+ model_kwargs = dict(
+ revision=model_args.model_revision,
+ trust_remote_code=model_args.trust_remote_code,
+ attn_implementation=model_args.attn_implementation,
+ torch_dtype=torch_dtype,
+ use_cache=False if training_args.gradient_checkpointing else True,
+ device_map=get_kbit_device_map() if quantization_config is not None else None,
+ quantization_config=quantization_config,
+ )
+ model = AutoModelForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ **model_kwargs,
+ )
+ return model
diff --git a/src/open_r1/utils/routed_morph.py b/src/open_r1/utils/routed_morph.py
new file mode 100644
index 000000000..835c784af
--- /dev/null
+++ b/src/open_r1/utils/routed_morph.py
@@ -0,0 +1,120 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional
+
+import requests
+
+
+class RoutedMorphSandbox:
+ """
+ Client for the MorphCloud router service that mimics the API of MorphCloud's Sandbox.
+
+ This class provides a simple interface to execute code via a central MorphCloud router,
+ which manages sandbox creation and cleanup. It allows batch processing of multiple scripts
+ in a single request for improved efficiency.
+
+ Attributes:
+ router_url (str): The URL of the MorphCloud router service.
+ timeout (int): Execution timeout in seconds.
+ request_timeout (int): HTTP request timeout in seconds.
+ """
+
+ def __init__(self, router_url: str, timeout: int = 300, request_timeout: int = 60):
+ """
+ Initialize the routed MorphCloud sandbox client.
+
+ Args:
+ router_url: The URL of the MorphCloud router, including host and port.
+ timeout: Default execution timeout in seconds.
+ request_timeout: Default HTTP request timeout in seconds.
+ """
+ self.router_url = router_url
+ self.timeout = timeout
+ self.request_timeout = request_timeout
+
+ def run_code(
+ self,
+ scripts: List[str],
+ languages: Optional[List[str]] = None,
+ timeout: Optional[int] = None,
+ request_timeout: Optional[int] = None,
+ ) -> List:
+ """
+ Execute multiple scripts using MorphCloud via the router.
+
+ Args:
+ scripts: List of code scripts to execute.
+ languages: List of programming languages for each script. If None, defaults to Python for all scripts.
+ timeout: Execution timeout in seconds. If None, uses the instance timeout.
+ request_timeout: HTTP request timeout in seconds. If None, uses the instance request_timeout.
+
+ Returns:
+ List of execution results with text and exception_str properties.
+ """
+
+ actual_timeout = timeout if timeout is not None else self.timeout
+ actual_request_timeout = request_timeout if request_timeout is not None else self.request_timeout
+
+ # Default to Python for all scripts if languages is not provided
+ if languages is None:
+ languages = ["python"] * len(scripts)
+
+ payload = {
+ "scripts": scripts,
+ "languages": languages,
+ "timeout": actual_timeout,
+ "request_timeout": actual_request_timeout,
+ }
+
+ try:
+ endpoint = f"http://{self.router_url}/execute_batch"
+ response = requests.post(endpoint, json=payload, timeout=actual_request_timeout)
+
+ if response.status_code != 200:
+ error = f"Request to MorphCloud router failed with status code: {response.status_code}"
+ print(error)
+
+ results = []
+ for _ in scripts:
+ results.append(type("obj", (object,), {"text": None, "exception_str": error}))
+ return results
+
+ response_data = response.json()
+ results = []
+
+ for item in response_data:
+ # Log the response data to see what we're getting
+ # print(f"RoutedMorphSandbox: Got response item: {item}")
+ result = type(
+ "obj",
+ (object,),
+ {
+ "text": item.get("text"),
+ "exception_str": item.get("exception_str"),
+ },
+ )
+ results.append(result)
+
+ return results
+
+ except Exception as e:
+ error = f"Error communicating with MorphCloud router: {str(e)}"
+ print(error)
+
+ results = []
+ for _ in scripts:
+ results.append(type("obj", (object,), {"text": None, "exception_str": error}))
+ return results
diff --git a/src/open_r1/utils/routed_sandbox.py b/src/open_r1/utils/routed_sandbox.py
new file mode 100644
index 000000000..97bb65cf4
--- /dev/null
+++ b/src/open_r1/utils/routed_sandbox.py
@@ -0,0 +1,109 @@
+# coding=utf-8
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional
+
+import requests
+from e2b_code_interpreter.models import Execution, ExecutionError, Result
+
+
+class RoutedSandbox:
+ """
+ A sandbox environment that routes code execution requests to the E2B Router.
+ This class is designed for batched execution of scripts, primarily for Python code.
+ It mimics the usage of 'Sandbox' from 'e2b_code_interpreter', but adds support for batch processing.
+
+ Attributes:
+ router_url (str): The URL of the E2B Router to which code execution requests are sent.
+ """
+
+ def __init__(self, router_url: str):
+ """
+ Initializes the RoutedSandbox with the specified router URL.
+
+ Args:
+ router_url (str): The URL of the E2B Router.
+ """
+ self.router_url = router_url
+
+ def run_code(
+ self,
+ scripts: list[str],
+ languages: Optional[List[str]] = None,
+ timeout: Optional[int] = None,
+ request_timeout: Optional[int] = None,
+ ) -> list[Execution]:
+ """
+ Executes a batch of scripts in the sandbox environment.
+
+ Args:
+ scripts (list[str]): A list of code scripts to execute.
+ languages (list[str], optional): List of programming languages for each script. If None, defaults to Python for all scripts.
+ timeout (Optional[int], optional): The maximum execution time for each script in seconds. Defaults to 300 seconds.
+ request_timeout (Optional[int], optional): The timeout for the HTTP request in seconds. Defaults to 30 seconds.
+
+ Returns:
+ list[Execution]: A list of Execution objects containing the results, logs, and errors (if any) for each script.
+ """
+ # Set default values for timeouts if not provided
+ if timeout is None:
+ timeout = 300 # Default to 5 minutes
+ if request_timeout is None:
+ request_timeout = 30 # Default to 30 seconds
+
+ # Default to Python for all scripts if languages is not provided
+ if languages is None:
+ languages = ["python"] * len(scripts)
+
+ # Prepare the payload for the HTTP POST request
+ payload = {
+ "scripts": scripts,
+ "languages": languages,
+ "timeout": timeout,
+ "request_timeout": request_timeout,
+ }
+
+ # Send the request to the E2B Router
+ response = requests.post(f"http://{self.router_url}/execute_batch", json=payload)
+ if not response.ok:
+ print(f"Request failed with status code: {response.status_code}")
+
+ # Parse the response and construct Execution objects
+ results = response.json()
+ output = []
+ for result in results:
+ if result["execution"] is None:
+ # If execution is None, create an empty Execution object
+ # This can happen when a script times out or fails to execute
+ execution = Execution()
+ else:
+ execution = Execution(
+ results=[Result(**r) for r in result["execution"]["results"]],
+ logs=result["execution"]["logs"],
+ error=(ExecutionError(**result["execution"]["error"]) if result["execution"]["error"] else None),
+ execution_count=result["execution"]["execution_count"],
+ )
+ output.append(execution)
+
+ return output
+
+
+if __name__ == "__main__":
+ # for local testing launch an E2B router with: python scripts/e2b_router.py
+ sbx = RoutedSandbox(router_url="0.0.0.0:8000")
+ codes = ["print('hello world')", "print('hello world)"]
+ executions = sbx.run_code(codes) # Execute Python inside the sandbox
+
+ print(executions)
diff --git a/src/open_r1/utils/wandb_logging.py b/src/open_r1/utils/wandb_logging.py
new file mode 100644
index 000000000..e52f911c8
--- /dev/null
+++ b/src/open_r1/utils/wandb_logging.py
@@ -0,0 +1,13 @@
+import os
+
+
+def init_wandb_training(training_args):
+ """
+ Helper function for setting up Weights & Biases logging tools.
+ """
+ if training_args.wandb_entity is not None:
+ os.environ["WANDB_ENTITY"] = training_args.wandb_entity
+ if training_args.wandb_project is not None:
+ os.environ["WANDB_PROJECT"] = training_args.wandb_project
+ if training_args.wandb_run_group is not None:
+ os.environ["WANDB_RUN_GROUP"] = training_args.wandb_run_group
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/slow/test_code_reward.py b/tests/slow/test_code_reward.py
new file mode 100644
index 000000000..8718eb35a
--- /dev/null
+++ b/tests/slow/test_code_reward.py
@@ -0,0 +1,219 @@
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import unittest
+
+from datasets import load_dataset
+
+from e2b_code_interpreter.models import Execution, ExecutionError
+from open_r1.rewards import code_reward, ioi_code_reward
+from open_r1.utils.routed_morph import RoutedMorphSandbox
+from open_r1.utils.routed_sandbox import RoutedSandbox
+
+
+class TestCodeRewards(unittest.TestCase):
+ def test_python_code_reward(self):
+ # requires E2B, see the README.md file
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled")
+ NUM_SAMPLES = 20
+ samples = code_dataset["train"].select(range(NUM_SAMPLES))
+ test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples]
+ reward_kwargs = {"verification_info": [sample["verification_info"] for sample in samples]}
+ rewards = code_reward(test_completions, **reward_kwargs)
+ print(rewards)
+ assert rewards == [1.0] * NUM_SAMPLES
+
+ def test_e2b_router(self):
+ # run router locally: python scripts/e2b_router.py
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled")
+ NUM_SAMPLES = 128
+ samples = code_dataset["train"].select(range(NUM_SAMPLES))
+ test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples]
+ reward_kwargs = {"verification_info": [sample["verification_info"] for sample in samples]}
+ rewards = code_reward(test_completions, e2b_router_url="0.0.0.0:8000", **reward_kwargs)
+ print(rewards)
+ assert rewards == [1.0] * NUM_SAMPLES
+
+ def test_e2b_router_parallel(self):
+ # run router locally: python scripts/e2b_router.py
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled")
+
+ BATCH_SIZE = 32
+ NUM_SAMPLES = 256
+
+ def batch_code_reward(examples):
+ test_completions = [[{"content": solution}] for solution in examples["gold_standard_solution"]]
+ reward_kwargs = {
+ "verification_info": [verification_info for verification_info in examples["verification_info"]]
+ }
+ rewards = code_reward(test_completions, e2b_router_url="0.0.0.0:8000", **reward_kwargs)
+ assert rewards == [1.0] * BATCH_SIZE
+ return examples
+
+ code_dataset = code_dataset["train"].select(range(NUM_SAMPLES))
+ code_dataset = code_dataset.map(
+ batch_code_reward,
+ batched=True,
+ batch_size=BATCH_SIZE,
+ num_proc=4,
+ load_from_cache_file=False,
+ )
+
+ def test_ioi_code_reward(self):
+ # This slow test case requires spinning up a bunch (I tested with ~64) of piston workers, see docs here
+ # slurm/piston/README.md
+ code_dataset = load_dataset("open-r1/ioi-reward-test-dataset")
+ NUM_SAMPLES = 16
+ samples = code_dataset["train"].select(range(NUM_SAMPLES))
+ test_completions = [[{"content": f"```cpp\n{sample['sample_solution']}```"}] for sample in samples]
+ keys = [key for key in samples[0] if key not in ["prompt", "completion"]]
+ reward_kwargs = {key: [example[key] for example in samples] for key in keys}
+ rewards = ioi_code_reward(test_completions, **reward_kwargs)
+ print(rewards)
+ assert rewards == [1.0] * NUM_SAMPLES
+
+ def test_e2b_router_run_code_success(self):
+ # run router locally: python scripts/e2b_router.py
+ routed_sandbox = RoutedSandbox(router_url="localhost:8000")
+ scripts = [
+ "print('hello from integration test')",
+ "result = 2 + 2\nprint(result)",
+ ]
+
+ results = routed_sandbox.run_code(scripts)
+
+ assert len(results) == 2
+
+ for result in results:
+ assert isinstance(result, Execution)
+ # assert result.exit_code == 0
+ assert result.error is None
+ assert "hello" in result.logs["stdout"][0] or "4" in result.logs["stdout"][0]
+
+ def test_e2b_router_run_code_with_error(self):
+ # run router locally: python scripts/e2b_router.py
+
+ routed_sandbox = RoutedSandbox(router_url="localhost:8000")
+ scripts = ["print('this is fine')", "print('unterminated string"]
+
+ results = routed_sandbox.run_code(scripts)
+
+ assert len(results) == 2
+
+ # First one should be okay
+ # assert results[0].exit_code == 0 # Execution object has no attribute 'exit_code'
+ assert results[0].error is None
+ assert "this is fine" in results[0].logs["stdout"][0]
+
+ # Second one should have a syntax error
+
+ # assert results[1].exit_code != 0 # Execution object has no attribute 'exit_code'
+ assert results[1].error is not None
+ assert isinstance(results[1].error, ExecutionError)
+ assert "SyntaxError" in results[1].error.name
+
+ def test_python_code_reward_morph(self):
+ # requires MorphCloud, see the README.md file
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled")
+ NUM_SAMPLES = 20
+ samples = code_dataset["train"].select(range(NUM_SAMPLES))
+ test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples]
+ reward_kwargs = {
+ "verification_info": [sample["verification_info"] for sample in samples],
+ "provider_type": "morph",
+ }
+ rewards = code_reward(test_completions, **reward_kwargs)
+ print(rewards)
+ assert rewards == [1.0] * NUM_SAMPLES
+
+ def test_morph_router(self):
+ # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled")
+ NUM_SAMPLES = 32
+ samples = code_dataset["train"].select(range(NUM_SAMPLES))
+ test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples]
+ reward_kwargs = {
+ "verification_info": [sample["verification_info"] for sample in samples],
+ "provider_type": "morph",
+ "morph_router_url": "0.0.0.0:8001",
+ }
+ rewards = code_reward(test_completions, **reward_kwargs)
+ print(rewards)
+ assert rewards == [1.0] * NUM_SAMPLES
+
+ def test_morph_router_parallel(self):
+ # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20
+ code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled")
+
+ BATCH_SIZE = 32
+ NUM_SAMPLES = 256
+
+ def batch_code_reward(examples):
+ test_completions = [[{"content": solution}] for solution in examples["gold_standard_solution"]]
+ reward_kwargs = {
+ "verification_info": [verification_info for verification_info in examples["verification_info"]],
+ "provider_type": "morph",
+ "morph_router_url": "0.0.0.0:8001",
+ }
+ rewards = code_reward(test_completions, **reward_kwargs)
+ assert rewards == [1.0] * BATCH_SIZE
+ return examples
+
+ code_dataset = code_dataset["train"].select(range(NUM_SAMPLES))
+ code_dataset = code_dataset.map(
+ batch_code_reward,
+ batched=True,
+ batch_size=BATCH_SIZE,
+ num_proc=4,
+ load_from_cache_file=False,
+ )
+
+ def test_morph_router_run_code_success(self):
+ # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20
+
+ routed_sandbox = RoutedMorphSandbox(router_url="localhost:8001")
+ scripts = [
+ "print('hello from morph integration test')",
+ "result = 2 + 2\nprint(result)",
+ ]
+
+ results = routed_sandbox.run_code(scripts)
+
+ assert len(results) == 2
+
+ for result in results:
+ assert result.exception_str is None
+ assert "hello" in result.text or "4" in result.text
+
+ def test_morph_router_run_code_with_error(self):
+ # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20
+
+ routed_sandbox = RoutedMorphSandbox(router_url="localhost:8001")
+ scripts = ["print('this is fine with morph')", "print('unterminated string"]
+
+ results = routed_sandbox.run_code(scripts)
+
+ assert len(results) == 2
+
+ # First one should be okay
+ assert results[0].exception_str is None
+ assert "this is fine with morph" in results[0].text
+
+ # Second one should have a syntax error
+ assert "SyntaxError" in results[1].text
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/test_rewards.py b/tests/test_rewards.py
new file mode 100644
index 000000000..03ac517c9
--- /dev/null
+++ b/tests/test_rewards.py
@@ -0,0 +1,568 @@
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import unittest
+
+from dotenv import load_dotenv
+from open_r1.configs import GRPOScriptArguments
+from open_r1.rewards import (
+ accuracy_reward,
+ format_reward,
+ get_code_format_reward,
+ get_cosine_scaled_reward,
+ get_repetition_penalty_reward,
+ get_reward_funcs,
+ get_soft_overlong_punishment,
+ len_reward,
+ reasoning_steps_reward,
+ tag_count_reward,
+)
+
+
+load_dotenv()
+
+
+class TestGetRewardFuncs(unittest.TestCase):
+ def test_get_reward_funcs(self):
+ """Test get_reward_funcs with various reward functions."""
+ reward_names = [
+ "accuracy",
+ "format",
+ "reasoning_steps",
+ "cosine",
+ "repetition_penalty",
+ "length",
+ "tag_count",
+ "code",
+ "ioi_code",
+ "code_format",
+ "binary_code",
+ ]
+ reward_func_names = [
+ "accuracy_reward",
+ "format_reward",
+ "reasoning_steps_reward",
+ "cosine_scaled_reward",
+ "repetition_penalty_reward",
+ "len_reward",
+ "tag_count_reward",
+ "code_reward",
+ "ioi_code_reward",
+ "code_format_reward",
+ "binary_code_reward",
+ ]
+
+ args = GRPOScriptArguments(
+ dataset_name="dummy",
+ reward_funcs=reward_names,
+ )
+
+ reward_funcs = get_reward_funcs(args)
+ self.assertEqual(len(reward_funcs), 11)
+ for func_name, func in zip(reward_func_names, reward_funcs):
+ self.assertEqual(func_name, func.__name__)
+
+
+class TestRewards(unittest.TestCase):
+ def test_accuracy_reward_correct_answer(self):
+ """Test accuracy_reward with a correct answer."""
+ completion = [[{"content": r"\boxed{\frac{63}{400}}"}]]
+ solution = [r"\frac{63}{400}"]
+ rewards = accuracy_reward(completion, solution)
+ self.assertEqual(rewards[0], 1.0)
+
+ def test_accuracy_reward_wrong_answer(self):
+ """Test accuracy_reward with an incorrect answer."""
+ completion = [[{"content": r"\boxed{\frac{64}{400}}"}]]
+ solution = [r"\frac{63}{400}"]
+ rewards = accuracy_reward(completion, solution)
+ self.assertEqual(rewards[0], 0.0)
+
+ def test_accuracy_reward_wrong_answer_no_latex(self):
+ """Test accuracy_reward with an incorrect answer and gold solution with no latex."""
+ completion = [[{"content": r"\boxed{3}"}]]
+ solution = ["6"]
+ rewards = accuracy_reward(completion, solution)
+ self.assertEqual(rewards[0], 0.0)
+
+ def test_format_reward_correct(self):
+ """Test format_reward with correct format."""
+ completion = [[{"content": "\nSome reasoning\n\n\nThe answer\n"}]]
+ rewards = format_reward(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+ def test_format_reward_incorrect(self):
+ """Test format_reward with incorrect format."""
+ incorrect_formats = [
+ "Only thinking",
+ "Only answer",
+ "No tags at all",
+ "Missing closingMissing closing",
+ "Wrong orderWrong order",
+ ]
+
+ for fmt in incorrect_formats:
+ completion = [[{"content": fmt}]]
+ rewards = format_reward(completion)
+ self.assertEqual(rewards[0], 0.0)
+
+ def test_reasoning_steps_reward(self):
+ """Test reasoning_steps_reward with various formats."""
+ test_cases = [
+ # Full credit cases (3 or more steps)
+ ("Step 1: First step\nStep 2: Second step\nStep 3: Third step", 1.0),
+ ("First, we do this.\nSecond, we do that.\nFinally, we conclude.", 1.0),
+ # Partial credit cases (less than 3 steps)
+ ("Step 1: Only step", 1 / 3),
+ ("First, we do this.\nFinally, we conclude.", 2 / 3),
+ # No credit case
+ ("Just plain text without any clear steps", 0.0),
+ ]
+
+ for content, expected_reward in test_cases:
+ completion = [[{"content": content}]]
+ rewards = reasoning_steps_reward(completion)
+ self.assertAlmostEqual(rewards[0], expected_reward)
+
+ def test_multiple_completions(self):
+ """Test handling multiple completions at once."""
+ completions = [
+ [{"content": r"\boxed{\frac{63}{400}}"}],
+ [{"content": r"\boxed{\frac{64}{400}}"}],
+ ]
+ solutions = [r"\frac{63}{400}", r"\frac{63}{400}"]
+
+ rewards = accuracy_reward(completions, solutions)
+ self.assertEqual(len(rewards), 2)
+ self.assertEqual(rewards[0], 1.0)
+ self.assertEqual(rewards[1], 0.0)
+
+ def test_cosine_scaled_reward(self):
+ """Test cosine_scaled_reward with various cases."""
+ # Test parameters
+ test_params = {
+ "min_value_wrong": -1.0,
+ "max_value_wrong": -0.5,
+ "min_value_correct": 0.5,
+ "max_value_correct": 1.0,
+ "max_len": 100,
+ }
+
+ test_cases = [
+ # Correct answers with different lengths
+ (
+ r"\boxed{\frac{63}{400}}",
+ r"\frac{63}{400}",
+ 20,
+ 0.943,
+ ), # Short correct answer
+ (
+ r"\boxed{\frac{63}{400}}",
+ r"\frac{63}{400}",
+ 80,
+ 0.547,
+ ), # Long correct answer
+ # Wrong answers with different lengths
+ (
+ r"\boxed{\frac{64}{400}}",
+ r"\frac{63}{400}",
+ 20,
+ -0.942,
+ ), # Short wrong answer
+ (
+ r"\boxed{\frac{64}{400}}",
+ r"\frac{63}{400}",
+ 80,
+ -0.547,
+ ), # Long wrong answer
+ ]
+
+ for content, solution, content_len, expected_reward in test_cases:
+ # Pad content to desired length
+ padded_content = content + " " * (content_len - len(content))
+ completion = [[{"content": padded_content}]]
+
+ rewards = get_cosine_scaled_reward(**test_params)(completion, [solution])
+ self.assertAlmostEqual(rewards[0], expected_reward, places=2)
+
+ def test_format_reward_specific_multiline(self):
+ """Test format_reward with a specific multiline input."""
+ inputs = "\nI will count each distinct object in the image:\n1. Purple scooter\n2. Red bicycle\n3. Green motorcycle\n4. Gray sedan\n5. Yellow school bus\n6. Small green double-decker bus\n7. Small red car\n8. Small purple car\n9. Small gray dirt bike\n\nThere are 9 distinct objects in total.\n\n\n9\n"
+ completion = [[{"content": inputs}]]
+ rewards = format_reward(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+ def test_same_length_responses(self):
+ """Test len_reward when all responses have the same length."""
+ completions = [
+ [{"content": r"\boxed{\frac{63}{400}}"}],
+ [{"content": r"\boxed{\frac{64}{400}}"}],
+ ]
+ solutions = [r"\frac{63}{400}", r"\frac{63}{400}"]
+
+ rewards = len_reward(completions, solutions)
+ self.assertEqual(rewards, [0.0, 0.0])
+
+ def test_different_lengths_correct_answers(self):
+ """Test len_reward with different length correct answers."""
+ completions = [
+ [{"content": r"\boxed{\frac{63}{400}}"}], # shorter
+ [{"content": r"\boxed{\frac{63}{400}} " + "x" * 10}], # longer
+ ]
+ solutions = [r"\frac{63}{400}", r"\frac{63}{400}"]
+
+ rewards = len_reward(completions, solutions)
+ self.assertGreater(rewards[0], rewards[1]) # shorter answer should get higher reward
+ self.assertAlmostEqual(rewards[0], 0.5) # shortest correct answer gets maximum reward
+
+ def test_different_lengths_incorrect_answers(self):
+ """Test len_reward with different length incorrect answers."""
+ completions = [
+ [{"content": r"\boxed{\frac{64}{400}}"}], # shorter
+ [{"content": r"\boxed{\frac{64}{400}} " + "x" * 10}], # longer
+ ]
+ solutions = [r"\frac{63}{400}", r"\frac{63}{400}"]
+
+ rewards = len_reward(completions, solutions)
+ self.assertLessEqual(rewards[0], 0.0) # incorrect answers should get non-positive rewards
+ self.assertLessEqual(rewards[1], 0.0)
+ self.assertGreater(rewards[0], rewards[1]) # shorter answer should still be penalized less
+
+ def test_mixed_correctness(self):
+ """Test len_reward with mix of correct and incorrect answers of different lengths."""
+ completions = [
+ [{"content": r"\boxed{\frac{63}{400}}"}], # correct, shorter
+ [{"content": r"\boxed{\frac{63}{400}} " + "x" * 10}], # correct, longer
+ [{"content": r"\boxed{\frac{64}{400}}"}], # incorrect, shorter
+ [{"content": r"\boxed{\frac{64}{400}} " + "x" * 10}], # incorrect, longer
+ ]
+ solutions = [r"\frac{63}{400}"] * 4
+
+ rewards = len_reward(completions, solutions)
+
+ # Shortest correct answer should get positive reward
+ self.assertGreater(rewards[0], 0.0)
+
+ # Longer correct answer might get negative reward:
+ self.assertGreater(rewards[2], rewards[1])
+ self.assertGreaterEqual(rewards[1], rewards[3])
+
+ # Incorrect answers should get non-positive rewards
+ self.assertLessEqual(rewards[2], 0.0)
+ self.assertLessEqual(rewards[3], 0.0)
+
+ # Shorter answers should get better rewards within their correctness category
+ self.assertGreater(rewards[0], rewards[1]) # correct answers
+ self.assertGreater(rewards[2], rewards[3]) # incorrect answers
+
+ def test_unparseable_solution(self):
+ """Test len_reward with unparseable solution."""
+ completions = [
+ [{"content": r"\boxed{answer}"}],
+ [{"content": r"\boxed{answer} " + "x" * 10}],
+ ]
+ solutions = ["unparseable_latex", "unparseable_latex"]
+
+ rewards = len_reward(completions, solutions)
+ self.assertGreater(rewards[0], rewards[1]) # shorter answer should still get better reward
+ self.assertAlmostEqual(rewards[0], 0.5) # treated as correct, shortest gets maximum reward
+
+
+class TestRepetitionPenaltyReward(unittest.TestCase):
+ def test_positive_max_penalty_raises_value_error(self):
+ with self.assertRaises(ValueError):
+ get_repetition_penalty_reward(ngram_size=2, max_penalty=1.0)
+ with self.assertRaisesRegex(ValueError, "max_penalty 1.5 should not be positive"):
+ get_repetition_penalty_reward(ngram_size=2, max_penalty=1.5)
+
+ def test_no_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0)
+ completions = [[{"content": "this is a test sentence"}]]
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [0.0])
+
+ def test_full_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0)
+ completions = [[{"content": "this this this this this"}]]
+
+ rewards = reward_fn(completions)
+ # (1 - 1/4) * -1 = -0.75
+ self.assertEqual(rewards, [-0.75])
+
+ def test_partial_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0)
+ completions = [[{"content": "this is a this is a test"}]]
+
+ rewards = reward_fn(completions)
+ # Unique 2-grams: (this, is), (is, a), (a, this), (a, test). 4 unique out of 6 total
+ # (1 - 4/6) * -1 = -1/3 = -0.3333...
+ self.assertAlmostEqual(rewards[0], -1 / 3)
+
+ def test_multiple_completions(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-0.5)
+ completions = [
+ [{"content": "this is a test"}],
+ [{"content": "test test test test"}],
+ ]
+
+ rewards = reward_fn(completions)
+ # Completion 1: (this, is, a), (is, a, test) -> 2 unique / 2 total -> (1 - 2/2) * -0.5 = 0
+ # Completion 2: (test, test, test) -> 1 unique / 2 total -> (1 - 1/2) * -0.5 = -0.25
+ self.assertAlmostEqual(rewards[0], 0.0)
+ self.assertAlmostEqual(rewards[1], -0.25)
+
+ def test_empty_completion(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0)
+ completions = [[{"content": ""}]]
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [0.0])
+
+ def test_different_ngram_size(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-2.0)
+ completions = [[{"content": "this is a this is a test"}]]
+
+ rewards = reward_fn(completions)
+ self.assertAlmostEqual(rewards[0], -0.4)
+
+ def test_mixed_case(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0)
+ completions = [
+ [{"content": "This is A Test"}],
+ [{"content": "this IS a test"}],
+ ]
+
+ rewards = reward_fn(completions)
+ # both completions should produce the same reward, because the text gets lowercased
+ self.assertAlmostEqual(rewards[0], rewards[1])
+
+ def test_one_word_completion(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "word"}]]
+
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [0.0])
+
+ def test_two_word_completion(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "two words"}]]
+
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [0.0])
+
+ def test_three_word_completion(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "three different words"}]]
+
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [0.0])
+
+ def test_three_word_repetition_completion(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "word word word word"}]]
+
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [-0.5])
+
+ def test_four_word_completion_with_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "one two one two"}]]
+
+ rewards = reward_fn(completions)
+ # ngrams are (one two one) (two one two). unique is 2 and count is 2, therefore (1-1) * -1.
+ self.assertEqual(rewards, [0.0])
+
+ def test_five_word_completion_with_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-0.5)
+ completions = [[{"content": "A B C A B"}]]
+
+ rewards = reward_fn(completions)
+ # (A B C) (B C A) (C A B). unique is 3. count is 3 (1-1) * -.5 = 0
+ self.assertEqual(rewards, [0.0])
+
+ def test_six_word_completion_with_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "A B C A B C"}]]
+
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [-0.25])
+
+ def test_long_completion_with_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "A B C A B C E F G A B C A B C"}]]
+ rewards = reward_fn(completions)
+ self.assertAlmostEqual(rewards[0], -0.3846, places=4)
+
+ def test_long_completion_without_repetition(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=3, max_penalty=-1.0)
+ completions = [[{"content": "A B C D E F G H I J K L"}]]
+
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [0.0])
+
+ def test_tag_count_rewards_all_correct(self):
+ """Test tag_count_reward with correct tags."""
+ completion = [[{"content": "\nSome reasoning\n\n\nThe answer\n"}]]
+ rewards = tag_count_reward(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+ def test_tag_count_rewards_missing_think_begin(self):
+ """Test tag_count_reward with missing tag."""
+ completion = [[{"content": "Some reasoning\n\n\nThe answer\n"}]]
+ rewards = tag_count_reward(completion)
+ self.assertEqual(rewards[0], 0.75)
+
+ def test_tag_count_rewards_missing_think_end(self):
+ """Test tag_count_reward with missing tag."""
+ completion = [[{"content": "\nSome reasoning\n\nThe answer\n"}]]
+ rewards = tag_count_reward(completion)
+ self.assertEqual(rewards[0], 0.75)
+
+ def test_tag_count_rewards_missing_answer_begin(self):
+ """Test tag_count_reward with missing tag."""
+ completion = [[{"content": "\nSome reasoning\n\nThe answer\n"}]]
+ rewards = tag_count_reward(completion)
+ self.assertEqual(rewards[0], 0.75)
+
+ def test_tag_count_rewards_missing_answer_end(self):
+ """Test tag_count_reward with missing tag."""
+ completion = [[{"content": "\nSome reasoning\n\n\nThe answer"}]]
+ rewards = tag_count_reward(completion)
+ self.assertEqual(rewards[0], 0.75)
+
+ def test_tag_count_rewards_missing_all_tags(self):
+ """Test tag_count_reward with missing all tags."""
+ completion = [[{"content": "Some reasoning\nThe answer"}]]
+ rewards = tag_count_reward(completion)
+ self.assertEqual(rewards[0], 0.0)
+
+ def test_full_repetition_with_language(self):
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0, language="en")
+ completions = [[{"content": "that that that that that"}]]
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [-0.75])
+ # begin test for zh language
+ reward_fn = get_repetition_penalty_reward(ngram_size=2, max_penalty=-1.0, language="zh")
+ completions = [[{"content": "这个这个这个这个这个"}]]
+ rewards = reward_fn(completions)
+ self.assertEqual(rewards, [-0.75])
+
+ def test_soft_overlong_punishment_short_completion(self):
+ """Test soft overlong punishment reward function with a short completion."""
+ # length 50, with max=100 and soft cache=20, reward should be 0.
+ reward_fn = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20)
+ completion_ids = [[1] * 50] # 50 <= 80
+ rewards = reward_fn(completion_ids=completion_ids)
+ self.assertEqual(rewards, [0])
+
+ def test_soft_overlong_punishment_long_completion(self):
+ """Test soft overlong punishment reward function with a longer than max completion."""
+ # 110 > 100, reward should be -1.
+ reward_fn = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20)
+ completion_ids = [[1] * 110]
+ rewards = reward_fn(completion_ids)
+ self.assertEqual(rewards, [-1])
+
+ def test_soft_overlong_punishment_intermediate_completion(self):
+ """Test soft overlong punishment reward function for intermediate length completion."""
+ reward_fn = get_soft_overlong_punishment(max_completion_len=100, soft_punish_cache=20)
+ completion_ids = [[1] * 90] # 90 is between 80 and 100
+ rewards = reward_fn(completion_ids)
+ self.assertAlmostEqual(rewards[0], -0.5, places=4)
+
+
+class TestCodeFormat(unittest.TestCase):
+ def test_correct_python_format(self):
+ """Test code format reward with correct Python format."""
+ completion = [
+ [
+ {
+ "content": "\nLet's solve this\nStep 1: First step\n\n\n```python\ndef hello():\n print('world')\n```\n"
+ }
+ ]
+ ]
+ reward_fn = get_code_format_reward(language="python")
+ rewards = reward_fn(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+ def test_incorrect_formats(self):
+ """Test code format reward with various incorrect formats."""
+ incorrect_formats = [
+ # Missing think/answer tags
+ "```python\ndef hello():\n print('world')\n```",
+ # Missing code block
+ "Some thinkingJust plain text",
+ # Wrong language
+ "Analysis```javascript\nconsole.log('hello');\n```",
+ # Missing language identifier
+ "Analysis```\ndef hello(): pass\n```",
+ # Wrong order of tags
+ "```python\ndef hello(): pass\n```Analysis",
+ ]
+
+ reward_fn = get_code_format_reward(language="python")
+ for fmt in incorrect_formats:
+ completion = [[{"content": fmt}]]
+ rewards = reward_fn(completion)
+ self.assertEqual(rewards[0], 0.0)
+
+ def test_multiple_code_blocks(self):
+ """Test format reward with multiple code blocks in think and answer sections."""
+ completion = [
+ [
+ {
+ "content": "\nHere's an example:\n```python\nx = 1\n```\nNow the solution:\n\n\n```python\ndef solution():\n return 42\n```\n"
+ }
+ ]
+ ]
+ reward_fn = get_code_format_reward(language="python")
+ rewards = reward_fn(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+ def test_different_languages(self):
+ """Test code format reward with different programming languages."""
+ completion = [
+ [
+ {
+ "content": "\nAnalysis\n\n\n```javascript\nconsole.log('hello');\n```\n"
+ }
+ ]
+ ]
+
+ # Test with JavaScript
+ js_reward_fn = get_code_format_reward(language="javascript")
+ rewards = js_reward_fn(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+ # Same completion should fail for Python
+ py_reward_fn = get_code_format_reward(language="python")
+ rewards = py_reward_fn(completion)
+ self.assertEqual(rewards[0], 0.0)
+
+ def test_multiline_code(self):
+ """Test format reward with complex multiline code blocks."""
+ completion = [
+ [
+ {
+ "content": "\nHere's the analysis\n\n\n```python\nclass Solution:\n def __init__(self):\n self.value = 42\n \n def get_value(self):\n return self.value\n```\n"
+ }
+ ]
+ ]
+ reward_fn = get_code_format_reward(language="python")
+ rewards = reward_fn(completion)
+ self.assertEqual(rewards[0], 1.0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/utils/test_data.py b/tests/utils/test_data.py
new file mode 100644
index 000000000..669057e78
--- /dev/null
+++ b/tests/utils/test_data.py
@@ -0,0 +1,129 @@
+# Copyright 2025 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import unittest
+from dataclasses import asdict
+
+from datasets import DatasetDict, load_dataset
+
+from open_r1.configs import DatasetConfig, DatasetMixtureConfig, ScriptArguments
+from open_r1.utils.data import get_dataset
+
+
+class TestGetDataset(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.dataset_name = "trl-internal-testing/zen"
+ cls.dataset_config = "conversational_preference"
+ cls.ref_dataset = load_dataset(cls.dataset_name, cls.dataset_config)
+
+ def test_dataset_and_config_name(self):
+ args = ScriptArguments(dataset_name=self.dataset_name, dataset_config=self.dataset_config)
+ dataset = get_dataset(args)
+ self.assertIsInstance(dataset, DatasetDict)
+ self.assertIn("train", dataset)
+ self.assertEqual(len(dataset["train"]), len(self.ref_dataset["train"]))
+
+ def test_unweighted_mixture(self):
+ """Mix train and test splits of the same dataset."""
+ dataset_configs = [
+ DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="train", columns=None, weight=None),
+ DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="test", columns=None, weight=None),
+ ]
+ dataset_mixture = DatasetMixtureConfig(
+ datasets=dataset_configs,
+ )
+ args = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
+ dataset = get_dataset(args)
+ self.assertIsInstance(dataset, DatasetDict)
+ self.assertIn("train", dataset)
+ self.assertEqual(len(dataset["train"]), len(self.ref_dataset["train"]) + len(self.ref_dataset["test"]))
+
+ def test_weighted_mixture(self):
+ """Test loading a dataset mixture with weights."""
+ dataset_configs = [
+ DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="train", columns=None, weight=0.25),
+ DatasetConfig(id=self.dataset_name, config=self.dataset_config, split="test", columns=None, weight=0.5),
+ ]
+ dataset_mixture = DatasetMixtureConfig(
+ datasets=dataset_configs,
+ )
+ args = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
+ dataset = get_dataset(args)
+ self.assertIsInstance(dataset, DatasetDict)
+ self.assertIn("train", dataset)
+ self.assertEqual(
+ len(dataset["train"]), len(self.ref_dataset["train"]) // 4 + len(self.ref_dataset["test"]) // 2
+ )
+
+ def test_mixture_and_test_split(self):
+ """Test loading a dataset mixture with test split."""
+ dataset_configs = [
+ DatasetConfig(
+ id=self.dataset_name, config=self.dataset_config, split="train[:10]", columns=None, weight=None
+ ),
+ ]
+ dataset_mixture = DatasetMixtureConfig(datasets=dataset_configs, test_split_size=0.2)
+ args = ScriptArguments(dataset_name=None, dataset_mixture=asdict(dataset_mixture))
+ dataset = get_dataset(args)
+ self.assertIsInstance(dataset, DatasetDict)
+ self.assertIn("train", dataset)
+ self.assertIn("test", dataset)
+ self.assertEqual(len(dataset["train"]), 8)
+ self.assertEqual(len(dataset["test"]), 2)
+
+ def test_mixture_column_selection(self):
+ """Test loading a dataset mixture with column selection."""
+ dataset_configs = [
+ DatasetConfig(
+ id=self.dataset_name,
+ config=self.dataset_config,
+ split="train",
+ columns=["prompt", "chosen"],
+ weight=None,
+ ),
+ ]
+ dataset_mixture = DatasetMixtureConfig(
+ datasets=dataset_configs,
+ )
+ args = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
+ dataset = get_dataset(args)
+ self.assertIsInstance(dataset, DatasetDict)
+ self.assertIn("train", dataset)
+ self.assertIn("prompt", dataset["train"].column_names)
+ self.assertIn("chosen", dataset["train"].column_names)
+
+ def test_mixture_with_mismatched_columns(self):
+ dataset_configs = [
+ DatasetConfig(
+ id=self.dataset_name, config=self.dataset_config, split="train", columns=["prompt"], weight=None
+ ),
+ DatasetConfig(
+ id=self.dataset_name, config=self.dataset_config, split="train", columns=["chosen"], weight=None
+ ),
+ ]
+ dataset_mixture = DatasetMixtureConfig(
+ datasets=dataset_configs,
+ )
+ with self.assertRaises(ValueError) as context:
+ _ = ScriptArguments(dataset_mixture=asdict(dataset_mixture))
+ self.assertIn("Column names must be consistent", str(context.exception))
+
+ def test_no_dataset_name_or_mixture(self):
+ with self.assertRaises(ValueError) as context:
+ _ = ScriptArguments(dataset_name=None, dataset_mixture=None)
+ self.assertIn("Either `dataset_name` or `dataset_mixture` must be provided", str(context.exception))
+
+
+if __name__ == "__main__":
+ unittest.main()