Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

agents.py 7.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
  1. import json
  2. from typing import Optional
  3. from openai import OpenAI
  4. from lib.configs import OPENAI_API_KEY
  5. from lib.types import Evaluation, Question
  6. __all__ = ["BaseAgent"]
  7. class BaseAgent:
  8. def __init__(self):
  9. self.client = None
  10. def __call__(self, *args, **kwargs):
  11. return self.run(*args, **kwargs)
  12. def run(self, *args, **kwargs):
  13. raise NotImplementedError
  14. class OpenAIQuestionGeneratorAgent(BaseAgent):
  15. def __init__(self):
  16. super().__init__()
  17. self.client = OpenAI(api_key=OPENAI_API_KEY)
  18. self.system_prompt = """You are a non-technical interviewer that interviews \
  19. across the following categories:
  20. - personal
  21. - role-specific
  22. - behavioural
  23. - situational
  24. You will be provided with a candidate's description.
  25. Generate {n_questions} questions, ensuring that there is a question for each category \
  26. and the questions should be based on the candidate's description.
  27. * You answer strictly as a list of JSON objects. Don't include any other verbose texts, \
  28. and don't include the markdown syntax anywhere.
  29. JSON format:
  30. [
  31. {{"question": "<personal_question>", "type": "personal"}},
  32. {{"question": "<role_specific_question>", "type": "role-specific"}},
  33. {{"question": "<behavioural_question>", "type": "behavioural"}},
  34. {{"question": "<situational_question>", "type": "situational"}},
  35. ...more questions to make up {n_questions} questions
  36. ]"""
  37. self.user_prompt = "Candidate Description:\n{description}"
  38. def __call__(self, description: str, n_questions: int = 4) -> Optional[list[Question]]:
  39. """
  40. Generate interview questions based on the given description.
  41. Args:
  42. description (str): The description used as input for question generation.
  43. n_questions (int, optional): The number of questions to generate. Defaults to 4.
  44. Returns:
  45. Optional[list[Question]]: A list of generated interview questions or None if an error occurs.
  46. """
  47. # Generate questions
  48. questions = self._generate(description, n_questions)
  49. return questions
  50. def run(self, description: str, n_questions: int = 4) -> Optional[list[Question]]:
  51. """
  52. Generate interview questions based on the given description.
  53. Args:
  54. description (str): The description used as input for question generation.
  55. n_questions (int, optional): The number of questions to generate. Defaults to 4.
  56. Returns:
  57. Optional[list[Question]]: A list of generated interview questions or None if an error occurs.
  58. """
  59. # Generate questions
  60. questions = self._generate(description, n_questions)
  61. return questions
  62. def _generate(self, description: str, n_questions: int) -> Optional[list[Question]]:
  63. """
  64. Generate interview questions based on the given description.
  65. Args:
  66. description (str): The description used as input for question generation.
  67. n_questions (int): The number of questions to generate.
  68. Returns:
  69. Optional[list[Question]]: A list of generated interview questions or None if an error occurs.
  70. """
  71. try:
  72. # Ensure that there are at least 4 questions
  73. if n_questions < 4:
  74. n_questions = 4
  75. output = self.client.chat.completions.create(
  76. model="gpt-3.5-turbo-1106",
  77. messages=[
  78. {
  79. "role": "system",
  80. "content": self.system_prompt.format(n_questions=n_questions),
  81. },
  82. {
  83. "role": "user",
  84. "content": self.user_prompt.format(description=description),
  85. },
  86. ],
  87. temperature=0.5,
  88. max_tokens=1024,
  89. top_p=1,
  90. frequency_penalty=0,
  91. presence_penalty=0,
  92. )
  93. questions = json.loads(output.choices[0].message.content or "[]")
  94. return questions
  95. except Exception:
  96. return None
  97. class OpenAIResponseEvaluationAgent(BaseAgent):
  98. def __init__(self):
  99. super().__init__()
  100. self.client = OpenAI(api_key=OPENAI_API_KEY)
  101. self.system_prompt = """You are an interviewer evaluating a candidate's response \
  102. to an interview question. Your task is to:
  103. - Evaluate the candidate's response on the scale of "good", "average", and "bad".
  104. - Provide a reason for why it's categorized as good, average, or bad.
  105. - Offer constructive feedback or suggestions for improvement.
  106. - Provide 2 samples of good responses.
  107. You will be provided with an interview question and a candidate response.
  108. Evaluate and provide output in the following JSON format:
  109. {
  110. "evaluation": "good, average, or bad",
  111. "reason": "Reason why it's good, average, or bad",
  112. "feedback": "Feedback or suggestions for improvement",
  113. "samples": [
  114. "<Good response 1>",
  115. "<Good response 2>"
  116. ]
  117. }"""
  118. self.user_prompt = """QUESTION:
  119. {question}
  120. RESPONSE:
  121. {response}"""
  122. def __call__(self, question: str, response: str) -> Optional[Evaluation]:
  123. """
  124. Evaluate a candidate's response to an interview question.
  125. Args:
  126. question (str): The interview question.
  127. response (str): The candidate's response.
  128. Returns:
  129. Optional[Evaluation]: The evaluation of the candidate's response or None if an error occurred.
  130. """
  131. # Generate questions
  132. evaluation = self._generate(question, response)
  133. return evaluation
  134. def run(self, question: str, response: str) -> Optional[Evaluation]:
  135. """
  136. Evaluate a candidate's response to an interview question.
  137. Args:
  138. question (str): The interview question.
  139. response (str): The candidate's response.
  140. Returns:
  141. Optional[Evaluation]: The evaluation of the candidate's response or None if an error occurred.
  142. """
  143. # Generate questions
  144. evaluation = self._generate(question, response)
  145. return evaluation
  146. def _generate(self, question: str, response: str) -> Optional[Evaluation]:
  147. """
  148. Evaluate a candidate's response to an interview question.
  149. Args:
  150. question (str): The interview question.
  151. response (str): The candidate's response.
  152. Returns:
  153. Optional[Evaluation]: The evaluation of the candidate's response or None if an error occurred.
  154. """
  155. try:
  156. output = self.client.chat.completions.create(
  157. model="gpt-3.5-turbo-1106",
  158. messages=[
  159. {
  160. "role": "system",
  161. "content": self.system_prompt,
  162. },
  163. {
  164. "role": "user",
  165. "content": self.user_prompt.format(question=question, response=response),
  166. },
  167. ],
  168. temperature=0.5,
  169. max_tokens=1024,
  170. top_p=1,
  171. frequency_penalty=0,
  172. presence_penalty=0,
  173. )
  174. questions = json.loads(output.choices[0].message.content or "{}")
  175. return questions
  176. except Exception:
  177. return None
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...