-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy patheval_aime25.py
More file actions
123 lines (90 loc) · 3.87 KB
/
eval_aime25.py
File metadata and controls
123 lines (90 loc) · 3.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
from transformers import AutoModelForCausalLM, AutoTokenizer
import json
from datasets import load_dataset
from dataset import extract_answer_llm
from vllm import LLM, SamplingParams
from dataset import extract_answer_llm, extract_answer_qwq, is_float
def main():
test_examples = load_dataset("math-ai/aime25", split="test")
test_examples = list(test_examples)
test_examples = test_examples*8
seed = 42
model_name = "Zigeng/R1-VeriThinker-7B"
llm = LLM(model=model_name,tensor_parallel_size=4, max_model_len=40000)
tokenizer = AutoTokenizer.from_pretrained(model_name)
right = 0
tokens = 0
answers = []
# Define batch size
batch_size = 30
for i in range(0, len(test_examples), batch_size):
sampling_params = SamplingParams(temperature=0.6, top_p=0.95, max_tokens=16384, seed=seed+i, stop=["\n</think>"])
end = min(i + batch_size, len(test_examples))
batch_examples = test_examples[i:end]
batch_prompts = []
batch_gt_answers = []
# Prepare prompts for the batch
for example in batch_examples:
prompt = example["problem"]
#deepseek r1
tail = r" Please reason step by step, and put your final answer within \boxed{}."
messages = [
{"role": "user", "content": prompt + tail}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
batch_prompts.append(text)
batch_gt_answers.append(example["answer"])
# Generate responses for batch
outputs = llm.generate(batch_prompts, sampling_params)
# Process each response in the batch
for j, (output) in enumerate(outputs):
response = output.outputs[0].text
num_new_tokens = len(tokenizer.encode(response))
gt_answer = batch_gt_answers[j]
llm_answer = extract_answer_qwq(response)
if is_float(llm_answer):
llm_answer = llm_answer
else:
llm_answer = extract_answer_llm(response)
print(gt_answer, "||", llm_answer)
if is_float(gt_answer) and is_float(llm_answer):
try:
accept = ( int(round(float(gt_answer)))==int(round(float(llm_answer))) )
except OverflowError:
accept = False
else:
accept = False
if accept:
right += 1
answers.append({
"question": batch_examples[j]["problem"],
"gt_answer": gt_answer,
"llm_answer": llm_answer,
"accept":accept,
"llm_response": response,
"tokens": num_new_tokens,
})
# Update token count with actual new tokens
tokens += num_new_tokens
# Print progress for each example
current_idx = i + j + 1
print("sample num:", current_idx, "result:", accept, "accuracy:", right/current_idx)
print("gt_answer:", gt_answer, "llm_answer:", llm_answer)
print('tokens:', num_new_tokens)
print('-' * 50)
# Calculate final metrics
avg_tokens = tokens / len(test_examples)
ratio = right / len(test_examples)
print("#############################################AIME2025#############################################")
print("num of samples:", len(test_examples))
print("avg tokens:", avg_tokens)
print("avg accuracy:", ratio)
# Save wrong answers to JSON
with open("test_aime25.json", "w", encoding="utf-8") as f:
json.dump(answers, f, ensure_ascii=False, indent=2)
if __name__ == "__main__":
main()