diff --git a/eval/benchs/__init__.py b/eval/benchs/__init__.py index d0229db..362ac3b 100644 --- a/eval/benchs/__init__.py +++ b/eval/benchs/__init__.py @@ -1,4 +1,5 @@ from .base_evaluator import BaseEvaluator +from .ceval.eval_ceval import CEvalEvaluator from .exampleqa.eval_exampleqa import ExampleQAEvaluator from .halluqa.eval_halluqa_mc import HalluQAMCEvaluator from .halueval.eval_halueval_dialog import HaluEvalDialogEvaluator @@ -11,6 +12,8 @@ # ! Register all evaluators here in alphabetical order. __all__ = [ + # CEval + "CEvalEvaluator", # ExampleQA "ExampleQAEvaluator", # HalluQA diff --git a/eval/benchs/ceval/README.md b/eval/benchs/ceval/README.md new file mode 100644 index 0000000..b1155db --- /dev/null +++ b/eval/benchs/ceval/README.md @@ -0,0 +1,34 @@ +# C-Eval + +## Information + +- **Paper**: C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models +- **Institution**: + - Shanghai Jiao Tong University + - Tsinghua University + - University of Edinburgh + - Hong Kong University of Science and Technology +- **arXiv**: https://arxiv.org/abs/2305.08322 +- **GitHub**: https://github.com/hkust-nlp/ceval +- **Website**: https://cevalbenchmark.com/ + +## Evaluators + +| Evaluator | Metric | Description | +| ---------------- | -------- | ----------------- | +| `CEvalEvaluator` | Accuracy | Multi-choice task | + +## Note + +Make sure you can **access Hugging Face** so that the dataset can be downloaded. + +## Citation + +```bibtex +@inproceedings{huang2023ceval, +title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models}, +author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian}, +booktitle={Advances in Neural Information Processing Systems}, +year={2023} +} +``` diff --git a/eval/benchs/ceval/dataset.py b/eval/benchs/ceval/dataset.py new file mode 100644 index 0000000..b42013b --- /dev/null +++ b/eval/benchs/ceval/dataset.py @@ -0,0 +1,40 @@ +from collections import defaultdict +from typing import Literal + +from datasets import load_dataset +from tqdm import tqdm + +from ..base_dataset import BaseDataset +from .utils import get_subject_mapping + + +class CEvalDataset(BaseDataset): + def __init__( + self, disciplines: set[str] = None, split: Literal["test", "val", "dev"] = "val" + ): + """ + Args: + disciplines: Disciplines to load. If None, all disciplines will be loaded. + split: The split to load. One of "test", "val", "dev". + """ + subject_mapping = get_subject_mapping() + self.data = [] + if disciplines is None: + disciplines = set(subject_mapping.keys()) + + for discipline in tqdm(disciplines, desc=f"Loading CEval > {split}"): + ds = load_dataset("ceval/ceval-exam", discipline, split=split) + for item in ds: + item["id"] = f"{discipline}_{split}_{item['id']:>04}" + item["type"] = discipline + self.data.append(item) + + def load(self) -> list[dict]: + return self.data + + def load_as_dict_of_discipline(self, num_shots: int) -> dict[str, list[dict]]: + examples = defaultdict(list) + for item in self.data: + if len(examples[item["type"]]) < num_shots: + examples[item["type"]].append(item) + return examples diff --git a/eval/benchs/ceval/eval_ceval.py b/eval/benchs/ceval/eval_ceval.py new file mode 100644 index 0000000..98096e9 --- /dev/null +++ b/eval/benchs/ceval/eval_ceval.py @@ -0,0 +1,124 @@ +from typing import Literal + +from ...llms import BaseLLM +from ..base_evaluator import BaseEvaluator +from .dataset import CEvalDataset +from .utils import get_subject_mapping + +QA_TEMPLATE = """ +{question} +A. {choice_a} +B. {choice_b} +C. {choice_c} +D. {choice_d} +答案:{answer} +""" + +PROMPT_TEMPLATE = """以下是中国关于{discipline}考试的单项选择题,请选出其中的正确答案。 +{qa_examples} +{qa_test}""" + + +CEVAL_HARD_DISCIPLINES = ",".join( + [ + "advanced_mathematics", + "discrete_mathematics", + "probability_and_statistics", + "college_chemistry", + "college_physics", + "high_school_mathematics", + "high_school_chemistry", + "high_school_physics", + ] +) + + +class CEvalEvaluator(BaseEvaluator): + + def __init__( + self, + model: BaseLLM, + num_batches: int = 1, + output_dir: str = "./output", + disciplines: str = CEVAL_HARD_DISCIPLINES, + split: Literal["test", "val", "dev"] = "val", + num_shots: int = 2, + ): + super().__init__( + model, + num_batches, + output_dir, + disciplines=disciplines, + split=split, + num_shots=num_shots, + ) + + self.split = split + + # ─── Get Valid Disciplines ──────────────────────────────────── + + self.all_disciplines = set(get_subject_mapping().keys()) + if disciplines is None: + self.disciplines = self.all_disciplines + else: + self.disciplines = set(disciplines.split(",")) & self.all_disciplines + + # ─── Load Examples For Few-shot Learning ────────────────────── + + if num_shots > 0: + ds = CEvalDataset(self.disciplines, split="dev") + self.discipline_examples = ds.load_as_dict_of_discipline(num_shots) + else: + self.discipline_examples = {} + + def set_generation_configs(self) -> None: + new_configs = {"max_new_tokens": 16, "do_sample": False} + self.model.update_generation_configs(new_configs) + + def load_batched_dataset(self) -> list[list[dict]]: + dataset = CEvalDataset(self.disciplines, split=self.split) + batches = dataset.to_batched(self.num_batches) + return batches + + def qa_prompt(self, examples: list[dict]) -> str: + prompt = "".join( + QA_TEMPLATE.format( + question=example["question"], + choice_a=example["A"], + choice_b=example["B"], + choice_c=example["C"], + choice_d=example["D"], + answer=example["answer"], + ) + for example in examples + ) + return prompt + + def scoring(self, data_point: dict) -> dict: + discipline = data_point["type"] + query = PROMPT_TEMPLATE.format( + discipline=get_subject_mapping()[discipline][1], # Get the Chinese name + qa_examples=self.qa_prompt(self.discipline_examples[discipline]), + qa_test=self.qa_prompt([data_point]), + ) + query = query.strip()[:-1] # Remove the answer to be predicted + response = self.model.safe_request(query) + answer = response.strip().split("\n")[0].strip() # Get the first line + return { + "metrics": { + "correct": answer == data_point["answer"], + }, + "log": { + "answer": answer, + "response": response, + "query": query, + }, + "valid": answer != "", + } + + def compute_overall(self, results: list[dict]) -> dict: + return { + "accuracy": sum([result["metrics"]["correct"] for result in results]) + / len(results), + "num": len(results), + } diff --git a/eval/benchs/ceval/subject_mapping.json b/eval/benchs/ceval/subject_mapping.json new file mode 100644 index 0000000..b61ef98 --- /dev/null +++ b/eval/benchs/ceval/subject_mapping.json @@ -0,0 +1,262 @@ +{ + "computer_network": [ + "Computer Network", + "计算机网络", + "STEM" + ], + "operating_system": [ + "Operating System", + "操作系统", + "STEM" + ], + "computer_architecture": [ + "Computer Architecture", + "计算机组成", + "STEM" + ], + "college_programming": [ + "College Programming", + "大学编程", + "STEM" + ], + "college_physics": [ + "College Physics", + "大学物理", + "STEM" + ], + "college_chemistry": [ + "College Chemistry", + "大学化学", + "STEM" + ], + "advanced_mathematics": [ + "Advanced Mathematics", + "高等数学", + "STEM" + ], + "probability_and_statistics": [ + "Probability and Statistics", + "概率统计", + "STEM" + ], + "discrete_mathematics": [ + "Discrete Mathematics", + "离散数学", + "STEM" + ], + "electrical_engineer": [ + "Electrical Engineer", + "注册电气工程师", + "STEM" + ], + "metrology_engineer": [ + "Metrology Engineer", + "注册计量师", + "STEM" + ], + "high_school_mathematics": [ + "High School Mathematics", + "高中数学", + "STEM" + ], + "high_school_physics": [ + "High School Physics", + "高中物理", + "STEM" + ], + "high_school_chemistry": [ + "High School Chemistry", + "高中化学", + "STEM" + ], + "high_school_biology": [ + "High School Biology", + "高中生物", + "STEM" + ], + "middle_school_mathematics": [ + "Middle School Mathematics", + "初中数学", + "STEM" + ], + "middle_school_biology": [ + "Middle School Biology", + "初中生物", + "STEM" + ], + "middle_school_physics": [ + "Middle School Physics", + "初中物理", + "STEM" + ], + "middle_school_chemistry": [ + "Middle School Chemistry", + "初中化学", + "STEM" + ], + "veterinary_medicine": [ + "Veterinary Medicine", + "兽医学", + "STEM" + ], + "college_economics": [ + "College Economics", + "大学经济学", + "Social Science" + ], + "business_administration": [ + "Business Administration", + "工商管理", + "Social Science" + ], + "marxism": [ + "Marxism", + "马克思主义基本原理", + "Social Science" + ], + "mao_zedong_thought": [ + "Mao Zedong Thought", + "毛泽东思想和中国特色社会主义理论体系概论", + "Social Science" + ], + "education_science": [ + "Education Science", + "教育学", + "Social Science" + ], + "teacher_qualification": [ + "Teacher Qualification", + "教师资格", + "Social Science" + ], + "high_school_politics": [ + "High School Politics", + "高中政治", + "Social Science" + ], + "high_school_geography": [ + "High School Geography", + "高中地理", + "Social Science" + ], + "middle_school_politics": [ + "Middle School Politics", + "初中政治", + "Social Science" + ], + "middle_school_geography": [ + "Middle School Geography", + "初中地理", + "Social Science" + ], + "modern_chinese_history": [ + "Modern Chinese History", + "近代史纲要", + "Humanities" + ], + "ideological_and_moral_cultivation": [ + "Ideological and Moral Cultivation", + "思想道德修养与法律基础", + "Humanities" + ], + "logic": [ + "Logic", + "逻辑学", + "Humanities" + ], + "law": [ + "Law", + "法学", + "Humanities" + ], + "chinese_language_and_literature": [ + "Chinese Language and Literature", + "中国语言文学", + "Humanities" + ], + "art_studies": [ + "Art Studies", + "艺术学", + "Humanities" + ], + "professional_tour_guide": [ + "Professional Tour Guide", + "导游资格", + "Humanities" + ], + "legal_professional": [ + "Legal Professional", + "法律职业资格", + "Humanities" + ], + "high_school_chinese": [ + "High School Chinese", + "高中语文", + "Humanities" + ], + "high_school_history": [ + "High School History", + "高中历史", + "Humanities" + ], + "middle_school_history": [ + "Middle School History", + "初中历史", + "Humanities" + ], + "civil_servant": [ + "Civil Servant", + "公务员", + "Other" + ], + "sports_science": [ + "Sports Science", + "体育学", + "Other" + ], + "plant_protection": [ + "Plant Protection", + "植物保护", + "Other" + ], + "basic_medicine": [ + "Basic Medicine", + "基础医学", + "Other" + ], + "clinical_medicine": [ + "Clinical Medicine", + "临床医学", + "Other" + ], + "urban_and_rural_planner": [ + "Urban and Rural Planner", + "注册城乡规划师", + "Other" + ], + "accountant": [ + "Accountant", + "注册会计师", + "Other" + ], + "fire_engineer": [ + "Fire Engineer", + "注册消防工程师", + "Other" + ], + "environmental_impact_assessment_engineer": [ + "Environmental Impact Assessment Engineer", + "环境影响评价工程师", + "Other" + ], + "tax_accountant": [ + "Tax Accountant", + "税务师", + "Other" + ], + "physician": [ + "Physician", + "医师资格", + "Other" + ] +} \ No newline at end of file diff --git a/eval/benchs/ceval/utils.py b/eval/benchs/ceval/utils.py new file mode 100644 index 0000000..1d1b894 --- /dev/null +++ b/eval/benchs/ceval/utils.py @@ -0,0 +1,10 @@ +import json +import os + +BASE_PATH = os.path.dirname(os.path.abspath(__file__)) + + +def get_subject_mapping(filename: str = "subject_mapping.json") -> dict: + path = os.path.join(BASE_PATH, filename) + with open(path) as f: + return json.load(f)