-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathKMeans_CUDA_PyBindings.cu
69 lines (55 loc) · 1.6 KB
/
KMeans_CUDA_PyBindings.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
// Python Bindings for KMeans_CUDA
#include "KMeans_CUDA.h"
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
namespace py = pybind11;
// Model / Data
KMeans_CUDA* model;
float* h_data;
// Copies memory to GPU
// FIXME -- Find a better way to pass and store the data.
void KMeans_CUDA_constructor(py::array_t<double> data, int N, int D, int K) {
h_data = new float[N];
auto buffer = data.request();
double* ptr = static_cast<double*>(buffer.ptr);
for (ssize_t i = 0; i < buffer.size; i++) {
h_data[i] = ptr[i];
}
model = new KMeans_CUDA(h_data, N, D, K);
}
// Run one epoch
void one_epoch() {
model->one_epoch();
}
// Runs `epochs` number of iterations
void fit(int epochs) {
model->fit(epochs);
}
// Compute error
float compute_error() {
return model->compute_error();
}
// Predictions for the current data with current clusters.
py::array_t<int> predictions() {
vector<int> predictions = model->predictions();
return py::array_t<int>(
{predictions.size()},
{sizeof(int)},
predictions.data()
);
}
// Deletes CPU memory. GPU memory is deleted in the KMeans_CUDA class.
// FIXME -- is there a way to call the destructor without having the user do so in Python?
void release_memory() {
delete model;
delete[] h_data;
}
// Binding functions
PYBIND11_MODULE(KMeans_CUDA, m) {
m.def("KMeans_CUDA_constructor", &KMeans_CUDA_constructor);
m.def("one_epoch", &one_epoch);
m.def("fit", &fit);
m.def("compute_error", &compute_error);
m.def("predictions", &predictions);
m.def("release_memory", &release_memory);
}