Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

test_ggml_integration.py 13 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  1. import ctypes
  2. import functools
  3. import logging
  4. import sys
  5. from ctypes import c_void_p
  6. from pathlib import Path
  7. from typing import Any, Iterator, Tuple
  8. import fairseq2.nn
  9. import fairseq2.nn.transformer
  10. import numpy as np
  11. import pytest
  12. import torch
  13. import ggml
  14. from ctypes_utils import Ptr
  15. from ggml import NativeObj
  16. from ggml_convert import convert_model
  17. Ctx = ggml.ggml_context_p
  18. UNITY_MODELS = Path(__file__).parent / "examples/unity/models"
  19. CTX_PARAMS = ggml.ggml_init_params(mem_size=16 * 1024 * 1024, mem_buffer=None)
  20. @pytest.fixture(name="ctx")
  21. def _ctx() -> Iterator[Ctx]:
  22. """Allocate a new context with 16 MB of memory"""
  23. try:
  24. ctx = ggml.ggml_init(params=CTX_PARAMS)
  25. yield ctx
  26. finally:
  27. ggml.ggml_free(ctx)
  28. def test_ggml_bindings_work(ctx: Ctx) -> None:
  29. # Instantiate tensors
  30. x = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 1)
  31. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 1)
  32. b = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 1)
  33. # Use ggml operations to build a computational graph
  34. x2 = ggml.ggml_mul(ctx, x, x)
  35. f = ggml.ggml_add(ctx, ggml.ggml_mul(ctx, a, x2), b)
  36. gf = ggml.ggml_build_forward(f)
  37. # Set the input values
  38. ggml.ggml_set_f32(x, 2.0)
  39. ggml.ggml_set_f32(a, 3.0)
  40. ggml.ggml_set_f32(b, 4.0)
  41. # Compute the graph
  42. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  43. # Get the output value
  44. output = ggml.ggml_get_f32_1d(f, 0)
  45. assert output == 16.0
  46. def test_ggml_matmul(ctx: Ctx) -> None:
  47. # Instantiate tensors
  48. a = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 4, 2)
  49. x = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 4, 3)
  50. # Use ggml operations to build a computational graph
  51. y = ggml.ggml_mul_mat(ctx, a, x)
  52. assert ggml.shape(y) == (3, 2)
  53. gf = ggml.ggml_build_forward(y)
  54. # Set the input values
  55. ggml.ggml_set_f32(x, 0.0)
  56. for i in range(4 * 3):
  57. ggml.ggml_set_f32_1d(x, i, i)
  58. ggml.ggml_set_f32(a, 0.0)
  59. ggml.ggml_set_f32_1d(a, 1, 1.0)
  60. ggml.ggml_set_f32_1d(a, 7, 1.0)
  61. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  62. output = [[ggml.ggml_get_f32_1d(y, j * 2 + i) for j in range(3)] for i in range(2)]
  63. assert output == [[1, 5, 9], [3, 7, 11]]
  64. def test_shape_works(ctx: Ctx) -> None:
  65. """GGML shape order convention is the reverse from numpy"""
  66. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  67. assert ggml.shape(a) == (10,)
  68. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 11, 21)
  69. assert ggml.shape(b) == (21, 11)
  70. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  71. assert ggml.shape(c) == (32, 22, 12)
  72. def test_nb_works(ctx: Ctx) -> None:
  73. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  74. assert ggml.nb(a) == (4, 40, 40, 40)
  75. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F16, 11, 21)
  76. assert ggml.nb(b) == (2, 22, 462, 462)
  77. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  78. assert ggml.nb(c) == (4, 48, 1056, 33792)
  79. def test_strides_works(ctx: Ctx) -> None:
  80. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  81. assert ggml.strides(a) == np.ones((10,), dtype=np.float32).strides
  82. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 11, 21)
  83. assert ggml.strides(b) == np.ones((21, 11), dtype=np.float32).strides
  84. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  85. assert ggml.strides(c) == np.ones((32, 22, 12), dtype=np.float32).strides
  86. def test_to_numpy_works_with_f32(ctx: Ctx) -> None:
  87. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10)
  88. na = ggml.to_numpy(a)
  89. for i in range(10):
  90. ggml.ggml_set_f32_1d(a, i, i)
  91. assert na[5] == 5
  92. assert np.allclose(na, np.array(range(10), dtype=np.float32))
  93. ggml.ggml_set_f32_1d(a, 5, -1.5)
  94. assert na[5] == -1.5
  95. # Note: GGML order of dims is reversed wrt numpy shapes
  96. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 11, 21)
  97. for i in range(11 * 21):
  98. ggml.ggml_set_f32_1d(b, i, i)
  99. nb = ggml.to_numpy(b)
  100. # assert nb.shape == (21, 11)
  101. assert nb[0, 5] == 5
  102. assert nb[3, 5] == 11 * 3 + 5
  103. assert np.allclose(
  104. nb, np.array(range(11 * 21), dtype=np.float32).reshape(ggml.shape(b))
  105. )
  106. ggml.ggml_set_f32_1d(b, 11 * 3 + 5, -1.5)
  107. assert nb[3, 5] == -1.5
  108. sum_rows = ggml.ggml_sum_rows(ctx, b)
  109. gf = ggml.ggml_build_forward(sum_rows)
  110. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  111. np_sum_rows = np.sum(nb, axis=-1, keepdims=True)
  112. assert np_sum_rows.shape == ggml.shape(sum_rows)
  113. for i in range(11):
  114. assert np_sum_rows[i] == ggml.ggml_get_f32_1d(sum_rows, i)
  115. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, 12, 22, 32)
  116. for i in range(12 * 22 * 32):
  117. ggml.ggml_set_f32_1d(c, i, i)
  118. nc = ggml.to_numpy(c)
  119. assert ggml.shape(c) == (32, 22, 12)
  120. assert nc[3, 5, 11] == 22 * 12 * 3 + 12 * 5 + 11
  121. assert np.allclose(
  122. nc, np.array(range(12 * 22 * 32), dtype=np.float32).reshape(ggml.shape(c))
  123. )
  124. ggml.ggml_set_f32_1d(c, 22 * 12 * 3 + 12 * 5 + 11, -1.5)
  125. assert nc[3, 5, 11] == -1.5
  126. def test_from_numpy_works_with_f32(ctx: Ctx) -> None:
  127. a = np.random.normal(size=(10,)).astype(dtype=np.float32)
  128. ga = ggml.from_numpy(ctx, a)
  129. assert ggml.shape(ga) == (10,)
  130. assert ggml.nb(ga) == ggml.nb(ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F32, 10))
  131. assert np.allclose(a, ggml.to_numpy(ga))
  132. a = np.random.normal(size=(11, 21)).astype(dtype=np.float32)
  133. ga = ggml.from_numpy(ctx, a)
  134. assert ggml.shape(ga) == (11, 21)
  135. assert ggml.nb(ga) == ggml.nb(
  136. ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, *a.shape[::-1])
  137. )
  138. assert np.allclose(a, ggml.to_numpy(ga))
  139. a = np.random.normal(size=(12, 22, 32)).astype(dtype=np.float32)
  140. ga = ggml.from_numpy(ctx, a)
  141. assert ggml.shape(ga) == (12, 22, 32)
  142. assert ggml.nb(ga) == ggml.nb(
  143. ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F32, *a.shape[::-1])
  144. )
  145. assert np.allclose(a, ggml.to_numpy(ga))
  146. def test_to_numpy_works_with_f16(ctx: Ctx) -> None:
  147. # We explicitly fill the tensor otherwise they might have non-zero values in them.
  148. a = ggml.ggml_new_tensor_1d(ctx, ggml.GGML_TYPE_F16, 10)
  149. na = ggml.to_numpy(a)
  150. ggml.ggml_set_f32(a, 2.14)
  151. assert np.allclose(na, np.ones((10,), dtype=np.float16) * 2.14)
  152. ggml.ggml_set_f32(a, 4.28)
  153. assert np.allclose(na, np.ones((10,), dtype=np.float16) * 4.28)
  154. b = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F16, 11, 21)
  155. nb = ggml.to_numpy(b)
  156. ggml.ggml_set_f32(b, 4.18)
  157. assert np.allclose(nb, np.ones((21, 11), dtype=np.float16) * 4.18)
  158. ggml.ggml_set_f32(b, 5.12)
  159. assert np.allclose(nb, np.ones((21, 11), dtype=np.float16) * 5.12)
  160. c = ggml.ggml_new_tensor_3d(ctx, ggml.GGML_TYPE_F16, 12, 22, 32)
  161. nc = ggml.to_numpy(c)
  162. ggml.ggml_set_f32(c, 3.16)
  163. assert np.allclose(nc, np.ones((32, 22, 12), dtype=np.float16) * 3.16)
  164. ggml.ggml_set_f32(c, 5.08)
  165. assert np.allclose(nc, np.ones((32, 22, 12), dtype=np.float16) * 5.08)
  166. def test_from_numpy_works_with_f16(ctx: Ctx) -> None:
  167. a = np.random.normal(size=(10,)).astype(dtype=np.float16)
  168. ga = ggml.from_numpy(ctx, a)
  169. assert np.allclose(a, ggml.to_numpy(ga))
  170. a = np.random.normal(size=(11, 21)).astype(dtype=np.float16)
  171. ga = ggml.from_numpy(ctx, a)
  172. assert np.allclose(a, ggml.to_numpy(ga))
  173. a = np.random.normal(size=(12, 22, 32)).astype(dtype=np.float16)
  174. ga = ggml.from_numpy(ctx, a)
  175. assert np.allclose(a, ggml.to_numpy(ga))
  176. def test_to_numpy_works_with_transposed(ctx: Ctx) -> None:
  177. ga = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 10, 5)
  178. a = ggml.to_numpy(ga)
  179. a[...] = np.arange(50).reshape(5, 10).astype(dtype=np.float32)
  180. gat = ggml.ggml_transpose(ctx, ga)
  181. at = ggml.to_numpy(gat)
  182. assert np.allclose(a.T, at)
  183. def test_ggml_slice(ctx: Ctx) -> None:
  184. ga = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 10, 5)
  185. a = ggml.to_numpy(ga)
  186. a[...] = np.arange(50).reshape(5, 10).astype(dtype=np.float32)
  187. gs0 = ggml.ggml_slice(ctx, ga, 0, 3, 7)
  188. s0 = ggml.to_numpy(gs0)
  189. assert np.allclose(a[:, 3:7], s0)
  190. gs1 = ggml.ggml_slice(ctx, ga, 1, 2, 5)
  191. s1 = ggml.to_numpy(gs1)
  192. assert np.allclose(a[2:5, :], s1)
  193. @pytest.mark.xfail(reason="to_numpy not implemented")
  194. def test_ggml_transpose_and_slice(ctx: Ctx) -> None:
  195. ga = ggml.ggml_new_tensor_2d(ctx, ggml.GGML_TYPE_F32, 10, 5)
  196. a = ggml.to_numpy(ga)
  197. a[...] = np.arange(50).reshape(5, 10).astype(dtype=np.float32)
  198. gat = ggml.ggml_transpose(ctx, ga)
  199. gs0 = ggml.ggml_slice(ctx, gat, 0, 2, 5)
  200. s0 = ggml.to_numpy(gs0)
  201. assert np.allclose(a.T[:, 2:5], s0)
  202. gs1 = ggml.ggml_slice(ctx, gat, 1, 3, 7)
  203. s1 = ggml.to_numpy(gs1)
  204. assert np.allclose(a.T[3:7, :], s1)
  205. def test_numpy_mul_mat(ctx: Ctx) -> None:
  206. slen, d_in, d_out = (5, 4, 2)
  207. # torch.nn and fairseq2.nn assumes (seq_len, dim) to represent inputs,
  208. x = np.zeros((slen, d_in), dtype=np.float32) # (seq_len, dim_in)
  209. x[0, :] = [1, 1 / 3, 0, 0]
  210. weight = np.eye(d_out, d_in, dtype=np.float32)
  211. weight[1, 1] = 1
  212. # assert weight.shape == (d_out, d_in) # (dim_out, dim_in)
  213. y_exp = x @ weight.T # (seq_len, dim_out)
  214. gx = ggml.from_numpy(ctx, x) # (dim_in, seq_len)
  215. gw = ggml.from_numpy(ctx, weight) # (dim_in, dim_out)
  216. # gb = ggml.from_numpy(ctx, linear.bias.numpy()) # (dim_out)
  217. # GGML linear impl
  218. assert ggml.ggml_can_mul_mat(gw, gx)
  219. # gy = ggml.ggml_add(ctx, ggml.ggml_mul_mat(ctx, gw, gx), gb) # (dim_out, seq_len)
  220. gy = ggml.ggml_mul_mat(ctx, gw, gx) # (dim_out, seq_len)
  221. ggml.build_and_compute(ctx, gy)
  222. y = ggml.to_numpy(gy)
  223. assert np.allclose(y_exp, y)
  224. @pytest.mark.parametrize("ndim", [2, 3, 4])
  225. def test_flatten(ctx: Ctx, ndim: int) -> None:
  226. shape = [11, 7, 5, 3][:ndim] # Prime numbers to avoid surprises
  227. numel = functools.reduce(lambda a, b: a * b, shape, 1)
  228. x = torch.arange(numel, dtype=torch.float32).reshape(shape)
  229. for torch_dim in range(ndim - 1):
  230. ggml_dim = ndim - 1 - torch_dim
  231. n = x.shape[torch_dim + 1]
  232. gx = ggml.from_numpy(ctx, x)
  233. gx1 = ggml.ggml_flatten_1d(ctx, gx, ggml_dim - 1)
  234. gy = ggml.ggml_unflatten_1d(ctx, gx1, ggml_dim - 1, n)
  235. x1 = x.flatten(torch_dim, torch_dim + 1)
  236. y = x1.unflatten(torch_dim, (-1, n))
  237. assert y.shape == x.shape
  238. assert np.allclose(y.numpy(), x.numpy())
  239. assert x1.shape == ggml.shape(gx1)
  240. assert np.allclose(x1.numpy(), ggml.to_numpy(gx1))
  241. assert y.shape == ggml.shape(gy)
  242. assert np.allclose(y.numpy(), ggml.to_numpy(gy))
  243. @torch.no_grad()
  244. def test_torch_spda_vs_ggml_flash_attn(ctx: Ctx) -> None:
  245. slen, d_in, num_heads = (5, 4, 2)
  246. torch.random.manual_seed(0)
  247. q = torch.zeros((num_heads, slen, d_in))
  248. torch.nn.init.uniform_(q, -1, 1)
  249. k = torch.zeros((num_heads, slen, d_in))
  250. torch.nn.init.uniform_(k, -1, 1)
  251. v = torch.zeros((num_heads, slen, d_in))
  252. torch.nn.init.uniform_(v, -1, 1)
  253. # Note: we are using x for both keys and queries, so every position
  254. # attends mostly to itself, hence y_exp looks a bit like arange(slen)
  255. y_exp = torch.nn.functional.scaled_dot_product_attention(q, k, v, is_causal=True)
  256. y_exp = y_exp.numpy()
  257. gq = ggml.from_numpy(ctx, q.numpy())
  258. gk = ggml.from_numpy(ctx, k.numpy())
  259. # ggml flash attention expect a different order of axis for v:
  260. # (H, slen, H_dim) -> (H, H_dim, slen)
  261. gv = ggml.from_numpy(ctx, v.transpose(1, 2).contiguous().numpy())
  262. assert ggml.shape(gv) == (num_heads, d_in, slen)
  263. gy = ggml.ggml_flash_attn(ctx, gq, gk, gv, True)
  264. gf = ggml.ggml_build_forward(gy)
  265. ggml.ggml_graph_compute_with_ctx(ctx, ctypes.pointer(gf), 1)
  266. y = ggml.to_numpy(gy)
  267. assert np.allclose(y_exp, y)
  268. @pytest.mark.parametrize("shape", [(5, 8, 4), (2, 5, 8, 4)])
  269. def test_ggml_softmax_vs_torch(ctx: Ctx, shape: Tuple[int, ...]) -> None:
  270. x = torch.empty(shape)
  271. torch.nn.init.uniform_(x, -1, 1)
  272. y_exp = torch.softmax(x, dim=-1).numpy()
  273. gx = ggml.from_numpy(ctx, x.numpy())
  274. gy = ggml.ggml_soft_max(ctx, gx)
  275. ggml.build_and_compute(ctx, gy)
  276. y = ggml.to_numpy(gy)
  277. assert np.allclose(y_exp, y, rtol=1e-3)
  278. assert np.allclose(np.argmax(y_exp, axis=-1), np.argmax(y, axis=-1))
  279. def test_can_return_hypothesis_ptr(ctx: Ctx) -> None:
  280. hyp_ptr = ggml._testing_return_hypothesis_ptr(ctx)
  281. hyp0, hyp1 = hyp_ptr[0], hyp_ptr[1]
  282. assert ggml.to_numpy(hyp0.seq).tolist() == [314]
  283. assert hyp0.score == pytest.approx(3.14)
  284. assert ggml.to_numpy(hyp1.seq).tolist() == [421]
  285. assert hyp1.score == pytest.approx(4.21)
  286. @pytest.mark.parametrize("inplace", ["", "inplace"])
  287. def test_set_2d(ctx: Ctx, inplace: bool):
  288. a = torch.empty((5, 3, 2))
  289. torch.nn.init.uniform_(a, -1, 1)
  290. b = torch.empty((3, 2))
  291. torch.nn.init.uniform_(b, -1, 1)
  292. a_original = a.clone()
  293. # make a copy of `a` before we modify it
  294. ga = ggml.from_numpy(ctx, a.clone().numpy())
  295. gb = ggml.from_numpy(ctx, b.numpy())
  296. a[3, ...] = b
  297. set_2d = ggml.ggml_set_2d_inplace if inplace else ggml.ggml_set_2d
  298. ga_updated = set_2d(ctx, ga, gb, ggml.nb(ga)[1], ggml.nb(ga)[2] * 3)
  299. ggml.build_and_compute(ctx, ga_updated)
  300. a_updated = ggml.to_numpy(ga if inplace else ga_updated)
  301. assert np.allclose(a.numpy(), a_updated)
  302. if not inplace:
  303. # When not using set_2d_inplace, the original tensor is unmodified.
  304. assert np.allclose(ggml.to_numpy(ga), a_original.numpy())
  305. assert ga.contents.data != ga_updated.contents.data
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...