Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

providers.test.ts 3.2 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  1. import fetch from 'node-fetch';
  2. import { OpenAiCompletionProvider, OpenAiChatCompletionProvider } from '../src/providers/openai';
  3. import { disableCache, enableCache } from '../src/cache.js';
  4. import { loadApiProvider } from '../src/providers.js';
  5. jest.mock('node-fetch', () => jest.fn());
  6. jest.mock('../src/esm.js');
  7. describe('providers', () => {
  8. afterEach(() => {
  9. jest.clearAllMocks();
  10. });
  11. test('OpenAiCompletionProvider callApi', async () => {
  12. const mockResponse = {
  13. json: jest.fn().mockResolvedValue({
  14. choices: [{ text: 'Test output' }],
  15. usage: { total_tokens: 10, prompt_tokens: 5, completion_tokens: 5 },
  16. }),
  17. };
  18. (fetch as unknown as jest.Mock).mockResolvedValue(mockResponse);
  19. const provider = new OpenAiCompletionProvider('text-davinci-003', 'test-api-key');
  20. const result = await provider.callApi('Test prompt');
  21. expect(fetch).toHaveBeenCalledTimes(1);
  22. expect(result.output).toBe('Test output');
  23. expect(result.tokenUsage).toEqual({ total: 10, prompt: 5, completion: 5 });
  24. });
  25. test('OpenAiChatCompletionProvider callApi', async () => {
  26. const mockResponse = {
  27. json: jest.fn().mockResolvedValue({
  28. choices: [{ message: { content: 'Test output' } }],
  29. usage: { total_tokens: 10, prompt_tokens: 5, completion_tokens: 5 },
  30. }),
  31. };
  32. (fetch as unknown as jest.Mock).mockResolvedValue(mockResponse);
  33. const provider = new OpenAiChatCompletionProvider('gpt-3.5-turbo', 'test-api-key');
  34. const result = await provider.callApi('Test prompt');
  35. expect(fetch).toHaveBeenCalledTimes(1);
  36. expect(result.output).toBe('Test output');
  37. expect(result.tokenUsage).toEqual({ total: 10, prompt: 5, completion: 5 });
  38. });
  39. test('OpenAiChatCompletionProvider callApi with cache disabled', async () => {
  40. disableCache();
  41. const mockResponse = {
  42. json: jest.fn().mockResolvedValue({
  43. choices: [{ message: { content: 'Test output' } }],
  44. usage: { total_tokens: 10, prompt_tokens: 5, completion_tokens: 5 },
  45. }),
  46. };
  47. (fetch as unknown as jest.Mock).mockResolvedValue(mockResponse);
  48. const provider = new OpenAiChatCompletionProvider('gpt-3.5-turbo', 'test-api-key');
  49. const result = await provider.callApi('Test prompt');
  50. expect(fetch).toHaveBeenCalledTimes(1);
  51. expect(result.output).toBe('Test output');
  52. expect(result.tokenUsage).toEqual({ total: 10, prompt: 5, completion: 5 });
  53. enableCache();
  54. });
  55. test('loadApiProvider with openai:chat', async () => {
  56. const provider = await loadApiProvider('openai:chat');
  57. expect(provider).toBeInstanceOf(OpenAiChatCompletionProvider);
  58. });
  59. test('loadApiProvider with openai:completion', async () => {
  60. const provider = await loadApiProvider('openai:completion');
  61. expect(provider).toBeInstanceOf(OpenAiCompletionProvider);
  62. });
  63. test('loadApiProvider with openai:chat:modelName', async () => {
  64. const provider = await loadApiProvider('openai:chat:gpt-3.5-turbo');
  65. expect(provider).toBeInstanceOf(OpenAiChatCompletionProvider);
  66. });
  67. test('loadApiProvider with openai:completion:modelName', async () => {
  68. const provider = await loadApiProvider('openai:completion:text-davinci-003');
  69. expect(provider).toBeInstanceOf(OpenAiCompletionProvider);
  70. });
  71. });
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...