forked from inducer/pycuda
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdemo_meta_codepy.py
55 lines (44 loc) · 1.44 KB
/
demo_meta_codepy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import pycuda.driver as cuda
import pycuda.autoinit
import numpy
import numpy.linalg as la
from pycuda.compiler import SourceModule
thread_strides = 16
block_size = 256
macroblock_count = 33
total_size = thread_strides*block_size*macroblock_count
dtype = numpy.float32
a = numpy.random.randn(total_size).astype(dtype)
b = numpy.random.randn(total_size).astype(dtype)
a_gpu = cuda.to_device(a)
b_gpu = cuda.to_device(b)
c_gpu = cuda.mem_alloc(a.nbytes)
from cgen import FunctionBody, \
FunctionDeclaration, POD, Value, \
Pointer, Module, Block, Initializer, Assign
from cgen.cuda import CudaGlobal
mod = Module([
FunctionBody(
CudaGlobal(FunctionDeclaration(
Value("void", "add"),
arg_decls=[Pointer(POD(dtype, name))
for name in ["tgt", "op1", "op2"]])),
Block([
Initializer(
POD(numpy.int32, "idx"),
"threadIdx.x + %d*blockIdx.x"
% (block_size*thread_strides)),
]+[
Assign(
"tgt[idx+%d]" % (o*block_size),
"op1[idx+%d] + op2[idx+%d]" % (
o*block_size,
o*block_size))
for o in range(thread_strides)]))])
mod = SourceModule(mod)
func = mod.get_function("add")
func(c_gpu, a_gpu, b_gpu,
block=(block_size,1,1),
grid=(macroblock_count,1))
c = cuda.from_device_like(c_gpu, a)
assert la.norm(c-(a+b)) == 0