Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

operators.cu 8.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  1. #include <stdio.h>
  2. #include <assert.h>
  3. #include "ATen/ATen.h"
  4. #include <cuda_fp16.h>
  5. #define MIN_VALUE (-1e38)
  6. typedef at::Half fp16;
  7. __half *cast(fp16 *ptr) {
  8. return reinterpret_cast<__half *>(ptr);
  9. }
  10. template <typename F>
  11. __global__ void kernel_wkv_forward(const int B, const int T, const int C,
  12. const float *__restrict__ const _w, const float *__restrict__ const _u, const F *__restrict__ const _k, const F *__restrict__ const _v,
  13. F *__restrict__ const _y, float *__restrict__ const _aa, float *__restrict__ const _bb, float *__restrict__ const _pp) {
  14. const int idx = blockIdx.x * blockDim.x + threadIdx.x;
  15. const int _b = idx / C;
  16. const int _c = idx % C;
  17. const int _offset = _b * T * C + _c;
  18. const int _state_offset = _b * C + _c;
  19. float u = _u[_c];
  20. float w = _w[_c];
  21. const F *__restrict__ const k = _k + _offset;
  22. const F *__restrict__ const v = _v + _offset;
  23. F *__restrict__ const y = _y + _offset;
  24. float aa = _aa[_state_offset];
  25. float bb = _bb[_state_offset];
  26. float pp = _pp[_state_offset];
  27. for (int i = 0; i < T; i++) {
  28. const int ii = i * C;
  29. const float kk = float(k[ii]);
  30. const float vv = float(v[ii]);
  31. float ww = u + kk;
  32. float p = max(pp, ww);
  33. float e1 = exp(pp - p);
  34. float e2 = exp(ww - p);
  35. y[ii] = F((e1 * aa + e2 * vv) / (e1 * bb + e2));
  36. ww = w + pp;
  37. p = max(ww, kk);
  38. e1 = exp(ww - p);
  39. e2 = exp(kk - p);
  40. aa = e1 * aa + e2 * vv;
  41. bb = e1 * bb + e2;
  42. pp = p;
  43. }
  44. _aa[_state_offset] = aa;
  45. _bb[_state_offset] = bb;
  46. _pp[_state_offset] = pp;
  47. }
  48. template <typename F>
  49. void cuda_wkv_forward(int B, int T, int C, float *w, float *u, F *k, F *v, F *y, float *aa, float *bb, float *pp) {
  50. dim3 threadsPerBlock( min(C, 32) );
  51. assert(B * C % threadsPerBlock.x == 0);
  52. dim3 numBlocks(B * C / threadsPerBlock.x);
  53. kernel_wkv_forward<<<numBlocks, threadsPerBlock>>>(B, T, C, w, u, k, v, y, aa, bb, pp);
  54. }
  55. template void cuda_wkv_forward<fp16>(
  56. int B, int T, int C,
  57. float *w, float *u, fp16 *k, fp16 *v, fp16 *y,
  58. float *aa, float *bb, float *pp);
  59. template void cuda_wkv_forward<float>(
  60. int B, int T, int C,
  61. float *w, float *u, float *k, float *v, float *y,
  62. float *aa, float *bb, float *pp);
  63. __global__ void kernel_mm_seq_fp32i8(
  64. const int B, const int N, const int M,
  65. const float *__restrict__ const x, const int x_stride,
  66. const uint8_t *__restrict__ const w, const int w_stride,
  67. const float *__restrict__ const mx,
  68. const float *__restrict__ const rx,
  69. const float *__restrict__ const my,
  70. const float *__restrict__ const ry,
  71. float *__restrict__ const y, const int y_stride) {
  72. const int i = blockIdx.x * blockDim.x + threadIdx.x;
  73. const int k = blockIdx.y * blockDim.y + threadIdx.y;
  74. if (i < B && k < M) {
  75. float y_local = 0;
  76. for (int j = 0; j < N; ++j) {
  77. y_local += x[i * x_stride + j] * (
  78. (float(w[j * w_stride + k]) + 0.5f)
  79. * rx[k] * ry[j] + mx[k] + my[j]
  80. );
  81. }
  82. y[i * y_stride + k] = y_local;
  83. }
  84. }
  85. template <typename F>
  86. void cuda_mm8_seq(int B, int N, int M,
  87. F *x, int x_stride,
  88. uint8_t *w, int w_stride,
  89. F *mx, F *rx,
  90. F *my, F *ry,
  91. F *y, int y_stride);
  92. template <>
  93. void cuda_mm8_seq<float>(int B, int N, int M,
  94. float *x, int x_stride,
  95. uint8_t *w, int w_stride,
  96. float *mx, float *rx,
  97. float *my, float *ry,
  98. float *y, int y_stride) {
  99. dim3 blockSize(1, 128);
  100. dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
  101. kernel_mm_seq_fp32i8<<<gridSize, blockSize>>>(
  102. B, N, M, x, x_stride, w, w_stride,
  103. mx, rx, my, ry, y, y_stride);
  104. }
  105. __global__ void kernel_mm_seq_fp16i8(
  106. const int B, const int N, const int M,
  107. const __half *__restrict__ const x, const int x_stride,
  108. const uint8_t *__restrict__ const w, const int w_stride,
  109. const __half *__restrict__ const mx,
  110. const __half *__restrict__ const rx,
  111. const __half *__restrict__ const my,
  112. const __half *__restrict__ const ry,
  113. __half *__restrict__ const y, const int y_stride) {
  114. const int i = blockIdx.x * blockDim.x + threadIdx.x;
  115. const int k = blockIdx.y * blockDim.y + threadIdx.y;
  116. if (i < B && k < M) {
  117. float y_local = 0;
  118. for (int j = 0; j < N; ++j) {
  119. y_local += __half2float(x[i * x_stride + j]) * (
  120. (float(w[j * w_stride + k]) + 0.5f)
  121. * __half2float(rx[k]) * __half2float(ry[j])
  122. + __half2float(mx[k]) + __half2float(my[j])
  123. );
  124. }
  125. y[i * y_stride + k] = __float2half(y_local);
  126. }
  127. }
  128. template <>
  129. void cuda_mm8_seq<fp16>(int B, int N, int M,
  130. fp16 *x, int x_stride,
  131. uint8_t *w, int w_stride,
  132. fp16 *mx, fp16 *rx,
  133. fp16 *my, fp16 *ry,
  134. fp16 *y, int y_stride) {
  135. dim3 blockSize(1, 128);
  136. dim3 gridSize((B + blockSize.x - 1) / blockSize.x, (M + blockSize.y - 1) / blockSize.y);
  137. kernel_mm_seq_fp16i8<<<gridSize, blockSize>>>(
  138. B, N, M, cast(x), x_stride, w, w_stride,
  139. cast(mx), cast(rx), cast(my), cast(ry), cast(y), y_stride);
  140. }
  141. #define MM8_ONE_JSPLIT 24
  142. #define MM8_ONE_TILE 1024
  143. __global__ void kernel_mm_one_fp32i8(
  144. const int N, const int M,
  145. const float *__restrict__ const x,
  146. const uint8_t *__restrict__ const w, const int w_stride,
  147. const float *__restrict__ const mx,
  148. const float *__restrict__ const rx,
  149. const float *__restrict__ const my,
  150. const float *__restrict__ const ry,
  151. float *__restrict__ const y) {
  152. const int k = blockIdx.y * blockDim.y + threadIdx.y;
  153. const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
  154. const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
  155. if (k < M) {
  156. float y_local = 0;
  157. for (int j = j0; j < j1; ++j) {
  158. y_local += x[j] * (
  159. (float(w[j * w_stride + k]) + 0.5f)
  160. * rx[k] * ry[j] + mx[k] + my[j]
  161. );
  162. }
  163. atomicAdd(&y[k], y_local);
  164. }
  165. }
  166. template <typename F>
  167. void cuda_mm8_one(int N, int M,
  168. F *x,
  169. uint8_t *w, int w_stride,
  170. F *mx, F *rx,
  171. F *my, F *ry,
  172. float *y);
  173. template <>
  174. void cuda_mm8_one<float>(int N, int M,
  175. float *x,
  176. uint8_t *w, int w_stride,
  177. float *mx, float *rx,
  178. float *my, float *ry,
  179. float *y) {
  180. dim3 blockSize(1, MM8_ONE_TILE);
  181. dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
  182. kernel_mm_one_fp32i8<<<gridSize, blockSize>>>(
  183. N, M, x, w, w_stride,
  184. mx, rx, my, ry, y);
  185. }
  186. __global__ void kernel_mm_one_fp16i8(
  187. const int N, const int M,
  188. const __half *__restrict__ const x,
  189. const uint8_t *__restrict__ const w, const int w_stride,
  190. const __half *__restrict__ const mx,
  191. const __half *__restrict__ const rx,
  192. const __half *__restrict__ const my,
  193. const __half *__restrict__ const ry,
  194. float *__restrict__ const y) {
  195. const int k = blockIdx.y * blockDim.y + threadIdx.y;
  196. const int j0 = min(N, blockIdx.x * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
  197. const int j1 = min(N, (blockIdx.x + 1) * ((N + MM8_ONE_JSPLIT - 1) / MM8_ONE_JSPLIT));
  198. if (k < M) {
  199. float y_local = 0;
  200. for (int j = j0; j < j1; ++j) {
  201. y_local += __half2float(x[j]) * (
  202. (float(w[j * w_stride + k]) + 0.5f)
  203. * __half2float(rx[k]) * __half2float(ry[j])
  204. + __half2float(mx[k]) + __half2float(my[j])
  205. );
  206. }
  207. atomicAdd(&y[k], y_local);
  208. }
  209. }
  210. template <>
  211. void cuda_mm8_one<fp16>(int N, int M,
  212. fp16 *x,
  213. uint8_t *w, int w_stride,
  214. fp16 *mx, fp16 *rx,
  215. fp16 *my, fp16 *ry,
  216. float *y) {
  217. dim3 blockSize(1, MM8_ONE_TILE);
  218. dim3 gridSize(MM8_ONE_JSPLIT, (M + blockSize.y - 1) / blockSize.y);
  219. kernel_mm_one_fp16i8<<<gridSize, blockSize>>>(
  220. N, M, cast(x), w, w_stride,
  221. cast(mx), cast(rx), cast(my), cast(ry), y);
  222. }
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...