Extension type instance inside CPython extension type - cpython

I have the following extension types in C
//VECTOR 3
typedef struct
{
PyObject_HEAD
double x;
double y;
double z;
}Vec3Object;
static PyMemberDef Vec3Object_Members[]=
{
{"x", T_DOUBLE, offsetof(Vec3Object, x), 0, "X component"},
{"y", T_DOUBLE, offsetof(Vec3Object, y), 0, "Y component"},
{"z", T_DOUBLE, offsetof(Vec3Object, z), 0, "Z component"},
{NULL}
};
//TRANSFORMATIONS
typedef struct
{
PyObject_HEAD
Vec3Object position;
Vec3Object rotation;
Vec3Object scale;
}TransformObject;
//ENTITY
typedef struct
{
PyObject_HEAD
TransformObject transform;
}EntityObject;
And the following PyTypeObject(its also the same for the others and their respective members):
static PyTypeObject TransformObject_Type=
{
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "scene.Transform",
.tp_doc = "Transform type",
.tp_basicsize = sizeof (TransformObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
.tp_new = PyType_GenericNew,
.tp_members = TransformObject_Members,
};
Creating a Vec3 works fine but how would a PyMemberDef for TransformObject and EntityObject be defined?
I basically want to call the following Python code:
entity.transform.position.x = 12.5
entity.transform.rotation.y = -74.0
Or the following methods once in a PyMethodDef:
Vec3.Length(entity.transform.position)

Related

llvm: create struct type array (pointer)

I'm working with llvm (frontend: C++) and trying to implement the struct type array (struct type pointer) in my language. My struct looks like this:
struct myStruct {
double const *myArray; //double type array
char const *myString; //string
};
And my code for struct type array implementation:
...
auto myString = Type::getInt8PtrTy(TheContext);
auto myArray = Type::getDoublePtrTy(TheContext);
auto myStruct = StructType::create(TheContext, "Struct");
myStruct->setBody({myString, myArray});
return PointerType::get(myStruct, 0);
I think that this way isn't correct because when I'm assigning data to the pointer and trying to print the array elements I got only first struct array element and only "myString" ("myArray" is always empty). I want to know what I'm doing wrong.
Additional code:
struct myStruct* getData(){
double const arr1[3] = { 10, 7, 9 }; double const* arraypnt1 = arr1; char const *str1 = "string1";
double const arr2[3] = { 1, 2, 4}; double const* arraypnt2 = arr2; char const *str2 = "string2";
struct myStruct p[2] = {{ arraypnt1, str1 }, { arraypnt2, str2 }};
struct myStruct* pairarr = p; // array of pairs
return pairarr;
}
//code generator
Value *ASTVarExpression::generateCode() {
Function *function = Builder.GetInsertBlock()->getParent();
Value *initValue = VariableValue->generateCode(); //this line creates call to function 'getData()'
if (!initValue) { return nullptr; }
AllocaInst *alloca = CreateFunctionEntryAlloca(function, VariableName, GetType(DeclaredType));
Builder.CreateStore(initValue, alloca); //store 'getData()' result in variable which type is 'myStructure* '
NamedValues[VariableName] = alloca;
return initValue;
}
static AllocaInst *CreateFunctionEntryAlloca(Function *function, StringRef variableName, Type* variableType) {
IRBuilder<> tempBlock(&function->getEntryBlock(), function->getEntryBlock().begin());
return tempBlock.CreateAlloca(variableType, 0, variableName);
}
Type *GetType(int expressionType){ //returns struct array type
auto myString = Type::getInt8PtrTy(TheContext);
auto myArray = Type::getDoublePtrTy(TheContext);
auto myStruct = StructType::create(TheContext, "Struct");
myStruct->setBody({myString, myArray});
return PointerType::get(myStruct, 0);
}
llvm IR code:
%varName = alloca %Struct.0*, align 8
%gencall = call %Struct* #getData()
store %Struct* %gencall, %Struct.0** %varName, align 8
ret %Struct* %gencall

WebGL2 not writing second output of `out int[2]` result

When I read output from the fragment shader:
#version 300 es
precision highp float;
precision highp int;
out int outColor[2];
void main() {
outColor[0] = 5;
outColor[1] = 2;
}
rendered into a 32 bit integer RG texture, I find that only the 5s have been written but not the 2s. Presumably I've got some format specifier wrong somewhere. Or I might be attaching the framebuffer to the wrong thing (gl.COLOR_ATTACHMENT0). I've tried varying various arguments but most changes that I make result in nothing coming out due to formats not lining up. It might be that I need to change 3 constants in tandem.
Here's my self-contained source. The output I want is an array alternatingbetween 5 and 2. Instead, I get an array alternating between 5 and semi-random large constants and 0.
let canvas /** #type {HTMLCanvasElement} */ = document.createElement('canvas');
let gl = canvas.getContext("webgl2");
let vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, `#version 300 es
in vec4 a_position;
void main() {
gl_Position = a_position;
}
`);
gl.compileShader(vertexShader);
console.assert(gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS), "Vertex shader compile failed.");
let fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, `#version 300 es
precision highp float;
precision highp int;
out int outColor[2];
void main() {
outColor[0] = 5;
outColor[1] = 2;
}
`);
gl.compileShader(fragmentShader);
let program = gl.createProgram();
gl.attachShader(program, vertexShader);
gl.attachShader(program, fragmentShader);
gl.linkProgram(program);
let positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([-3, -1, 1, 3, 1, -1]), gl.STATIC_DRAW);
let positionAttributeLocation = gl.getAttribLocation(program, "a_position");
let vao = gl.createVertexArray();
gl.bindVertexArray(vao);
gl.enableVertexAttribArray(positionAttributeLocation);
gl.vertexAttribPointer(positionAttributeLocation, 2, gl.FLOAT, false, 0, 0);
let w = 4;
let h = 4;
let texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RG32I, w, h, 0, gl.RG_INTEGER, gl.INT, null);
let frameBuffer = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
gl.useProgram(program);
gl.viewport(0, 0, w, h);
gl.drawArrays(gl.TRIANGLES, 0, 3);
let outputBuffer = new Int32Array(w*h*2);
gl.readPixels(0, 0, w, h, gl.RG_INTEGER, gl.INT, outputBuffer);
console.log(outputBuffer);
Arrayed outputs like out int outColor[2]; are used for outputting to multiple render targets. In your case, two render targets with one channel each, because you've used a scalar type.
To express a single render target with two channels, try out ivec2 outColor;.

How can I convert CGAL points into PCL point cloud?

I would like to utilize functions and classes from both library (CGAL and PCL). Therefore, it needs to convert the processed data from one into another.
So, How can I convert points in CGAL into pointcloud in pcl and vice-versa?
Given the headers:
#include <pcl/point_types.h>
#include <pcl/conversions.h>
#include <CGAL/Surface_mesh.h>
#include <CGAL/Simple_cartesian.h>
Here is a function to convert from PCL to CGAL
int convert_mesh_from_PCL_to_CGAL(pcl::PolygonMesh::Ptr PCL_mesh, CGAL_Mesh& CGAL_mesh)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr mesh_cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::fromPCLPointCloud2( PCL_mesh->cloud, *mesh_cloud );
// clear and reserve the
CGAL_mesh.clear();
int n = mesh_cloud->size();
int f = PCL_mesh->polygons.size();
int e = 0;
CGAL_mesh.reserve(n, 2*f, e);
//copy the vertices
double x, y, z;
for (int i=0; i<mesh_cloud->size(); i++)
{
Point p;
x = mesh_cloud->points[i].x;
y = mesh_cloud->points[i].y;
z = mesh_cloud->points[i].z;
p = Point(x, y, z);
CGAL_mesh.add_vertex(p);
}
// copy the faces
std::vector <int> vertices;
for(int i=0; i<PCL_mesh->polygons.size(); i++)
{
vertices.resize(3);
vertices[0] = PCL_mesh->polygons[i].vertices[0];
vertices[1] = PCL_mesh->polygons[i].vertices[1];
vertices[2] = PCL_mesh->polygons[i].vertices[2];
CGAL_mesh.add_face(CGAL_Mesh::Vertex_index (vertices[0]),
CGAL_Mesh::Vertex_index (vertices[1]),
CGAL_Mesh::Vertex_index (vertices[2]));
}
return 0;
}
For CGAL to PCL I had some commented code, I'll try to test it and update it later but that might give you an idea on how to do it.
int convert_mesh_from_CGAL_to_PCL(CGAL_Mesh CGAL_mesh, pcl::PolygonMesh::Ptr old_PCL_mesh, pcl::PolygonMesh::Ptr PCL_mesh)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr mesh_cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::fromPCLPointCloud2( old_PCL_mesh->cloud, *mesh_cloud );
int i=0;
BOOST_FOREACH(CGAL_vertex v, vertices(CGAL_mesh))
{
mesh_cloud->points[i].x = CGAL_mesh[v].point.x();
mesh_cloud->points[i].y = CGAL_mesh[v].point.y();
mesh_cloud->points[i].z = CGAL_mesh[v].point.z();
i++;
}
//BOOST_FOREACH(CGAL_vertex v, vertices(CGAL_mesh))
//BOOST_FOREACH(CGAL_face f, faces(CGAL_mesh))
pcl::toPCLPointCloud2( *mesh_cloud, PCL_mesh->cloud );
return 0;
}
It's not that complicated. Here's a simple example:
First, some headers you might need:
#include <CGAL/Simple_cartesian.h>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <utility> // std::tuple
#include <vector>
Some using declarations make life easy.
(CGAL examples tend to use typedefs a lot, like this one.)
// CGAL using declarations
using Kernel = CGAL::Simple_cartesian<float>;
using Point = Kernel::Point_3;
using Vector = Kernel::Vector_3;
using Color = std::array<unsigned char, 3>;
using PNC = std::tuple<Point, Vector, Color>;
// PCL using declarations
using Cloud = pcl::PointCloud<pcl::PointXYZRGBNormal>;
To convert CGAL points to PCL point cloud:
Cloud cgal2pcl(const std::vector<PNC> & points)
{
Cloud cloud;
for (const auto & pnc : points) {
const Point & p = std::get<0>(pnc);
const Vector & v = std::get<1>(pnc);
const Color & c = std::get<2>(pnc);
pcl::PointXYZRGBNormal pt;
pt.x = p.x();
pt.y = p.y();
pt.z = p.z();
pt.normal_x = v.x();
pt.normal_y = v.y();
pt.normal_z = v.z();
pt.r = c[0];
pt.g = c[1];
pt.b = c[2];
cloud.points.push_back(pt);
}
return cloud;
}
And from PCL point cloud to CGAL points:
std::vector<PNC> pcl2cgal(const Cloud & cloud)
{
std::vector<PNC> points;
points.reserve(cloud.points.size());
for (const auto & pt : cloud.points) {
Point p (pt.x, pt.y, pt.z );
Vector n (pt.normal_x, pt.normal_y, pt.normal_z);
Color c { {pt.r, pt.g, pt.b} };
points.push_back(std::make_tuple(p, n, c));
}
return points;
}
For your concrete example, you might want to change std::tuple<Point, Vector, Color> to std::pair<Point, Vector> to store pcl::PointNormal.
And if you need pcl::PointXYZ, then std::vector<Point> might just meet the demands.

Passing data to nlopt in Rcpp?

This is a rather simple question, but I haven't been able to quite find the answer on the web yet.
Wishing my latest attempt, here is latest compiler output:
note: candidate function not viable: no known conversion from 'double (unsigned int, const double *, void *, void )' to 'nlopt_func' (aka 'double ()(unsigned int, const double *, double *, void *)') for 2nd argument
From this error I surmise that I am now wrapping or 'type casting' the data argument correctly and also the parameter vector. The discrepency between the third input, the gradient, confuses me. As I am calling a gradient free optimization routine.
Here is a simple linear regression with a constant and a variable:
#include "RcppArmadillo.h"
// [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::depends(nloptr)]]
//#include <vector>
#include <nloptrAPI.h>
using namespace arma;
using namespace Rcpp;
typedef struct {
arma::mat data_in;
} *my_func_data;
typedef struct {
double a, b;
} my_theta;
double myfunc(unsigned n, const double *theta, void *grad, void *data){
my_func_data &temp = (my_func_data &) data;
arma::mat data_in = temp->data_in;
my_theta *theta_temp = (my_theta *) theta;
double a = theta_temp->a, b = theta_temp->b;
int Len = arma::size(data_in)[0];
arma::vec Y1 = data_in(span(0, Len-1), 1);
arma::vec Y2 = data_in(span(0, Len-1), 2);
arma::vec res = data_in(span(0, Len-1), 0) - a*Y1 - b*Y2 ;
return sum(res);
}
// [[Rcpp::export]]
void test_nlopt_c() {
arma::mat data_in(10,3);
data_in(span(0,9),0) = arma::regspace(40, 49);
data_in(span(0,9),1) = arma::ones(10);
data_in(span(0,9),2) = arma::regspace(10, 19);
my_func_data &temp = (my_func_data &) data_in;
double lb[2] = { 0, 0,}; /* lower bounds */
nlopt_opt opt;
opt = nlopt_create(NLOPT_LN_NELDERMEAD, 2); /* algorithm and dimensionality */
nlopt_set_lower_bounds(opt, lb);
nlopt_set_min_objective(opt, myfunc, &data_in );
nlopt_set_xtol_rel(opt, 1e-4);
double minf; /* the minimum objective value, upon return */
double x[2] = {0.5, 0.5}; /* some initial guess */
nlopt_result result = nlopt_optimize(opt, x, &minf);
Rcpp::Rcout << "result:" << result;
return;
}
Got it figured out, stupid answer turns out to be correct, just change 'void' to 'double', no clue why. Anyway, the example code needs some improving but it works.
#include "RcppArmadillo.h"
// [[Rcpp::depends(RcppArmadillo)]]
// [[Rcpp::depends(nloptr)]]
//#include <vector>
#include <nloptrAPI.h>
using namespace arma;
using namespace Rcpp;
typedef struct {
arma::mat data_in;
} *my_func_data;
typedef struct {
double a, b;
} my_theta;
double myfunc(unsigned n, const double *theta, double *grad, void *data){
my_func_data &temp = (my_func_data &) data;
arma::mat data_in = temp->data_in;
my_theta *theta_temp = (my_theta *) theta;
double a = theta_temp->a, b = theta_temp->b;
int Len = arma::size(data_in)[0];
arma::vec Y1 = data_in(span(0, Len-1), 1);
arma::vec Y2 = data_in(span(0, Len-1), 2);
arma::vec res = data_in(span(0, Len-1), 0) - a*Y1 - b*Y2 ;
return sum(res);
}
// [[Rcpp::export]]
void test_nlopt_c() {
arma::mat data_in(10,3);
data_in(span(0,9),0) = arma::regspace(40, 49);
data_in(span(0,9),1) = arma::ones(10);
data_in(span(0,9),2) = arma::regspace(10, 19);
my_func_data &temp = (my_func_data &) data_in;
double lb[2] = { 0, 0,}; /* lower bounds */
nlopt_opt opt;
opt = nlopt_create(NLOPT_LN_NELDERMEAD, 2); /* algorithm and dimensionality */
nlopt_set_lower_bounds(opt, lb);
nlopt_set_min_objective(opt, myfunc, &data_in );
nlopt_set_xtol_rel(opt, 1e-4);
double minf; /* the minimum objective value, upon return */
double x[2] = {0.5, 0.5}; /* some initial guess */
nlopt_result result = nlopt_optimize(opt, x, &minf);
Rcpp::Rcout << "result:" << result;
return;
}

How do you read arguments passed to a native kernel?

I have a native kernel setup but I don't know how to convert its void* argument into anything useful. In the native kernel of this snippet, how would I get the int (7) or the int[] (16 ints set to 0)?
void __stdcall nativeKernel(void * args)
{
int a1 = (*(int*)args);
cout << "a1-->: "<< a1 << endl; // gibberish
}
void kernelCaller()
{
const int dim1Size = 16;
int dim1[dim1Size] = {};
cl_int status = 0;
cl_mem mem_d1 = clCreateBuffer(*context, 0, sizeof(int)*dim1Size, NULL, &status);
clEnqueueWriteBuffer(*queue, mem_d1, CL_TRUE, 0, sizeof(int)*dim1Size, dim1, 0, NULL, NULL);
const void* args[2] = {(void*)7, NULL};
cl_mem mem_list[1] = {mem_d1};
const void* args_mem_loc[1] = {&args[1]};
cl_event run;
status = clEnqueueNativeKernel(*queue, nativeKernel, args, 2, 1, mem_list, args_mem_loc, 0, NULL, &run);
status = clEnqueueReadBuffer(*queue, mem_d1, CL_TRUE, 0, sizeof(int)*dim1Size, dim1, 1, &run, NULL);
for(auto i = 0; i != dim1Size; i++)
cout << dim1[i] << " ";
}
instead of playing hard with void* i would like to suggest to use struct
create your parameter structure like:
struct myparams{
int a
int a[3];
};
and then create and fill one struct myparams in your program and pass its address to the kernelcaller
struct myparams params;
params.a=3;
status = clEnqueueNativeKernel(*queue, nativeKernel, (void*)&params, 2, 1, mem_list, args_mem_loc, 0, NULL, &run);
and in the nativeKernel just unbox the void* into your parameter struct:
struct myparams *params=(myparams*)args;
beware: in the example above i passed a pointer of the stack...you might not want that ;)

Resources