Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

completion.test.ts 1.2 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
  1. import { disableCache, enableCache, fetchWithCache } from '../../../src/cache';
  2. import { OpenAiCompletionProvider } from '../../../src/providers/openai/completion';
  3. jest.mock('../../../src/cache');
  4. const mockFetchWithCache = jest.mocked(fetchWithCache);
  5. describe('OpenAI Provider', () => {
  6. beforeEach(() => {
  7. jest.resetAllMocks();
  8. disableCache();
  9. });
  10. afterEach(() => {
  11. enableCache();
  12. });
  13. describe('OpenAiCompletionProvider', () => {
  14. it('should call API successfully with text completion', async () => {
  15. const mockResponse = {
  16. data: {
  17. choices: [{ text: 'Test output' }],
  18. usage: { total_tokens: 10, prompt_tokens: 5, completion_tokens: 5 },
  19. },
  20. cached: false,
  21. status: 200,
  22. statusText: 'OK',
  23. severity: 'info',
  24. };
  25. mockFetchWithCache.mockResolvedValue(mockResponse);
  26. const provider = new OpenAiCompletionProvider('text-davinci-003');
  27. const result = await provider.callApi('Test prompt');
  28. expect(mockFetchWithCache).toHaveBeenCalledTimes(1);
  29. expect(result.output).toBe('Test output');
  30. expect(result.tokenUsage).toEqual({ total: 10, prompt: 5, completion: 5 });
  31. });
  32. });
  33. });
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...