Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

transform.js 2.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  1. /**
  2. * Example transform function for SageMaker endpoints
  3. * This file demonstrates how to transform prompts for SageMaker models
  4. */
  5. /**
  6. * Default export transformation function for SageMaker
  7. * This function will be used when importing this file without a specific function name
  8. *
  9. * @param {string|object} promptOrJson - The raw prompt text or JSON object from the response
  10. * @param {object} context - Contains configuration and variables
  11. * @returns {string|object} - Transformed prompt or processed response
  12. */
  13. module.exports = function (promptOrJson, context) {
  14. // Check if this is being used for prompt transformation (input is a string)
  15. if (typeof promptOrJson === 'string') {
  16. // Format for a JumpStart model by default
  17. return {
  18. inputs: promptOrJson,
  19. parameters: {
  20. max_new_tokens: context?.config?.maxTokens || 256,
  21. temperature: context?.config?.temperature || 0.7,
  22. top_p: context?.config?.topP || 0.9,
  23. do_sample: true,
  24. },
  25. };
  26. }
  27. // Otherwise, this is being used for response transformation (input is a JSON object)
  28. else {
  29. // Extract the generated text from the response
  30. const generatedText =
  31. promptOrJson.generated_text ||
  32. (Array.isArray(promptOrJson) && promptOrJson[0]?.generated_text) ||
  33. promptOrJson.text ||
  34. promptOrJson;
  35. // Return the extracted text with additional metadata
  36. return {
  37. output: typeof generatedText === 'string' ? generatedText : JSON.stringify(generatedText),
  38. source: 'SageMaker',
  39. model_type: context?.config?.modelType || 'custom',
  40. timestamp: new Date().toISOString(),
  41. };
  42. }
  43. };
  44. /**
  45. * Format a prompt for a JumpStart Llama model
  46. * @param {string} prompt The raw prompt text
  47. * @param {object} context Contains configuration details
  48. * @returns {object} Formatted payload for JumpStart Llama
  49. */
  50. module.exports.formatLlamaPayload = function (prompt, context) {
  51. return {
  52. inputs: prompt,
  53. parameters: {
  54. max_new_tokens: context?.config?.maxTokens || 256,
  55. temperature: context?.config?.temperature || 0.7,
  56. top_p: context?.config?.topP || 0.9,
  57. do_sample: true,
  58. },
  59. };
  60. };
  61. /**
  62. * Format a prompt for a Hugging Face Mistral model
  63. * @param {string} prompt The raw prompt text
  64. * @param {object} context Contains configuration details
  65. * @returns {object} Formatted payload for Hugging Face Mistral
  66. */
  67. module.exports.formatMistralPayload = function (prompt, context) {
  68. return {
  69. inputs: prompt,
  70. parameters: {
  71. max_new_tokens: context?.config?.maxTokens || 256,
  72. temperature: context?.config?.temperature || 0.7,
  73. top_p: context?.config?.topP || 0.9,
  74. do_sample: true,
  75. return_full_text: false,
  76. },
  77. };
  78. };
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...