Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

vercelAiSdkExample.js 2.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  1. // Example from @Codeshark-NET https://github.com/promptfoo/promptfoo/issues/922
  2. // @ts-check
  3. import { anthropic } from '@ai-sdk/anthropic';
  4. import { generateObject } from 'ai';
  5. import promptfoo from 'promptfoo';
  6. import { promptSchema } from './schemaValidation.mjs';
  7. class CustomProvider {
  8. constructor(options) {
  9. // Provider ID can be overridden by the config file (e.g. when using multiple of the same provider)
  10. this.providerId = options.id || 'custom provider';
  11. // options.config contains any custom options passed to the provider
  12. this.config = options.config;
  13. }
  14. id() {
  15. return this.providerId;
  16. }
  17. async callApi(prompt, context) {
  18. const cache = await promptfoo.default.cache.getCache();
  19. // Create a unique cache key based on the prompt and context
  20. const cacheKey = `api:${this.providerId}:${prompt}}`; // :${JSON.stringify(context)
  21. // Check if the response is already cached
  22. const cachedResponse = await cache.get(cacheKey);
  23. if (cachedResponse) {
  24. return {
  25. // Required
  26. output: JSON.parse(cachedResponse),
  27. // Optional
  28. tokenUsage: {
  29. total: 0, // No tokens used because it's from the cache
  30. prompt: 0,
  31. completion: 0,
  32. },
  33. cost: 0, // No cost because it's from the cache
  34. };
  35. }
  36. // If not cached, make the function call
  37. const model = anthropic('claude-3-5-haiku-20241022');
  38. const { object, usage } = await generateObject({
  39. model,
  40. messages: JSON.parse(prompt),
  41. maxTokens: 4096,
  42. temperature: 0.4,
  43. maxRetries: 0,
  44. schema: promptSchema,
  45. mode: 'tool',
  46. });
  47. const inputCost = 0.00025 / 1000; //config.cost ?? model.cost.input;
  48. const outputCost = 0.00125 / 1000; // config.cost ?? model.cost.output;
  49. const totalCost =
  50. inputCost * usage.promptTokens + outputCost * usage.completionTokens || undefined;
  51. // Store the response in the cache
  52. try {
  53. await cache.set(cacheKey, JSON.stringify(object));
  54. } catch (error) {
  55. console.error('Failed to store response in cache:', error);
  56. }
  57. return {
  58. // Required
  59. output: object,
  60. // Optional
  61. tokenUsage: {
  62. total: usage.totalTokens,
  63. prompt: usage.promptTokens,
  64. completion: usage.completionTokens,
  65. },
  66. cost: totalCost,
  67. };
  68. }
  69. }
  70. export default CustomProvider;
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...