-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconf_example.yml
30 lines (29 loc) · 1.39 KB
/
conf_example.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# Define the probability for each category of perturbation.
# Each probability can be 0, but at least one must be greater than 0.
# The total sum of these probabilities must not exceed 1.
probabilities: # probability of each kind of perturbation to happen (at sample level)
wrong_groundtruth: 0
no_correct_answer: 0
multiple_correct_answers: 0
bad_options_clarity: 0
bad_questions_clarity: 0
# here you should define the parameters of your llm call method
# this project supports the HF inference client and the OpenAI client
llm:
type: openai # type http for HF inference client, openai for the OpenAI client
configs: # here you can define your parameters for calling an openai model
completion: # for these parameters refer to https://github.com/openai/openai-python/blob/5a89126ad208c58b9e1dbd1fbdb698e4c00f7d8e/src/openai/_client.py#L49C7-L49C28
max_tokens:
model:
temperature:
top_p:
frequency_penalty:
presence_penalty:
#llm:
#type: http # type http for HF inference client, openai for the OpenAI client
#configs: # here you can define your parameters for the HF InferenceClient. Please refer to https://github.com/openai/openai-python/blob/5a89126ad208c58b9e1dbd1fbdb698e4c00f7d8e/src/openai/_client.py#L49C7-L49C28
# model:
# token:
# timeout:
# cookies:
is_a_test: True # true if you want to test the perturbation on a small portion of the dataset