|
import os |
|
import torch |
|
from torch import distributed as dist |
|
from transformers import GptOssForCausalLM, PreTrainedTokenizerFast |
|
|
|
def initialize_process(): |
|
|
|
local_rank = int(os.environ["LOCAL_RANK"]) |
|
torch.cuda.set_device(local_rank) |
|
dist.init_process_group(backend="nccl") |
|
|
|
def run_inference(): |
|
model_id = "openai/gpt-oss-120b" |
|
tok = PreTrainedTokenizerFast.from_pretrained(model_id) |
|
|
|
|
|
model = GptOssForCausalLM.from_pretrained( |
|
model_id, |
|
tp_plan="auto", |
|
torch_dtype="auto", |
|
).eval() |
|
|
|
messages = [ |
|
{"role": "system", "content": "Be concise."}, |
|
{"role": "user", "content": "Explain KV caching briefly."}, |
|
] |
|
inputs = tok.apply_chat_template( |
|
messages, |
|
add_generation_prompt=True, |
|
return_tensors="pt", |
|
return_dict=True, |
|
reasoning_effort="low", |
|
) |
|
|
|
local_rank = int(os.environ["LOCAL_RANK"]) |
|
device = torch.device(f"cuda:{local_rank}") |
|
inputs = {k: v.to(device, non_blocking=True) for k, v in inputs.items()} |
|
|
|
with torch.inference_mode(): |
|
out = model.generate(**inputs, max_new_tokens=128) |
|
torch.cuda.synchronize(device) |
|
|
|
|
|
dist.barrier() |
|
if dist.get_rank() == 0: |
|
print(tok.decode(out[0][inputs["input_ids"].shape[-1]:])) |
|
|
|
def main(): |
|
initialize_process() |
|
try: |
|
run_inference() |
|
finally: |
|
dist.destroy_process_group() |
|
|
|
if __name__ == "__main__": |
|
main() |