faster-transformers-scripts / benchmark-kernels-with-without.py
ariG23498's picture
ariG23498 HF Staff
Create benchmark-kernels-with-without.py
f33dbc4 verified
import os; os.environ["CUDA_VISIBLE_DEVICES"]="0"
import torch
from torch.utils import benchmark
from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
def load_model(use_kernels):
model_id = "openai/gpt-oss-20b"
quantization_config = Mxfp4Config(dequantize=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype="auto",
device_map="cuda:0",
use_kernels=use_kernels,
quantization_config=quantization_config,
).eval()
return model
def generate(model, model_inputs, max_new_tokens):
with torch.inference_mode():
model.generate(
**model_inputs,
do_sample=False,
temperature=None,
max_new_tokens=max_new_tokens,
eos_token_id=-1,
disable_compile=True,
)
if __name__ == "__main__":
results = []
max_new_tokens = 256
batch_size = 256
base_prompts = [
"What is Tensor Parallelism?",
"Explain machine learning fundamentals.",
"How do neural networks work?",
"What are the benefits of distributed computing?",
"Describe the attention mechanism in transformers.",
"What is gradient descent?",
"How does backpropagation work?",
"Explain the concept of overfitting.",
]
for use_kernels in [True, False]:
model = load_model(use_kernels)
for batch_size in [32, 64, 128, 256]:
messages = [
[{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size)
]
tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
texts = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low") for m in messages]
inputs = tokenizer(
texts,
return_tensors="pt",
padding=True,
padding_side="left",
).to("cuda:0")
label = "time taken to generate"
results.append(
benchmark.Timer(
stmt="generate(model, model_inputs, max_new_tokens)",
setup='from __main__ import generate',
globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens},
num_threads=torch.get_num_threads(),
label=label,
sub_label=f"num tokens: {max_new_tokens} batch size: {batch_size}",
description=f"use kernels: {use_kernels}"
).timeit(5)
)
inputs.to("cpu")
del inputs
model.to("cpu")
del model
compare = benchmark.Compare(results)
compare.print()
# [---------------------------- time taken to generate ----------------------------]
# | use kernels: True | use kernels: False
# 12 threads: ----------------------------------------------------------------------
# num tokens: 256 batch size: 32 | 12.7 | 9.1
# num tokens: 256 batch size: 64 | 12.7 | 10.0
# num tokens: 256 batch size: 128 | 12.8 | 13.9
# num tokens: 256 batch size: 256 | 15.0 | 21.2
# Times are in seconds (s).