So I've tried to put labels on my objects. I tried out a test example to just put one simple label there. When I ran the program, however, all the objects were somewhat transparent.
paintGL:
void mainWidget::paintGL(){
QPainter painter(this);
painter.beginNativePainting();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glViewport(0, 0, width(), height());
projection = camera->getProjectionMatrix(60.0f, width(), height(), 0.1f, 100.0f);
vec3 cameraPos(camera->getPosition());
view = camera->getViewMatrix();
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
if (isLine)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
shaderProgram->bind();
shaderProgram->setUniformValue("Light.ambient", 0.1f);
shaderProgram->setUniformValue("Light.position", QVector3D(ld[0], ld[1], ld[2]));
model = mat4(1.0f);
model = glm::translate(model, modelOrigin + cubePosModel);
setRotModel(cubeRotModel);
setMatrices(shaderProgram, true);
shaderProgram->setUniformValue("viewPosition", QVector3D(cameraPos[0], cameraPos[1], cameraPos[2]));
if (showCube)
cubeMesh->render(shaderProgram);
shaderProgram->release();
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (isLine)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
view = camera->getViewMatrix();
model = mat4(1.0f);
model = glm::translate(model, modelOrigin + planePosModel);
setRotModel(planeRotModel);
shaderProgram->bind();
setMatrices(shaderProgram, true);
shaderProgram->setUniformValue("viewPosition", QVector3D(cameraPos[0], cameraPos[1], cameraPos[2]));
if (showPlane)
planeMesh->render(shaderProgram);
shaderProgram->release();
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (isLine)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
model = glm::mat4(1.0f);
model = glm::translate(model, modelOrigin + teapotPosModel);
setRotModel(teapotRotModel);
shaderProgram->bind();
setMatrices(shaderProgram, true);
if (showTeapot)
teapotMesh->render(shaderProgram);
shaderProgram->release();
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
model = glm::mat4(1.0f);
model = glm::translate(model, modelOrigin + cubeLightPosModel);
model = glm::scale(model, glm::vec3(0.3f));
setRotModel(cubeLightRotModel);
lightCubeProgram->bind();
setMatrices(lightCubeProgram, false);
if (showCubeLight)
cubeMesh->render(lightCubeProgram);
lightCubeProgram->release();
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
axis_vao->bind();
model = mat4(1.0f);
model = glm::translate(model, modelOrigin);
model = glm::scale(model, vec3(5.0f));
model = glm::rotate(model, glm::radians(2.0f), vec3(1, 1, 0));
drawAxisProgram->bind();
glm::mat4 pvm = projection * view * model;
drawAxisProgram->setUniformValue("mvpMatrix", QMatrix4x4(glm::value_ptr(pvm)).transposed());
if (showGrid)
glDrawArrays(GL_LINES, 0, axisSize);
axis_vao->release();
drawAxisProgram->release();
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (isLine)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
model = mat4(1.0f);
model = glm::translate(model, modelOrigin + vec3(0, 2, -1));
shaderProgram->bind();
setMatrices(shaderProgram, true);
if (showSphere)
sphereMesh->render(shaderProgram);
shaderProgram->release();
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
painter.endNativePainting();
painter.setPen(Qt::yellow);
painter.setFont(QFont("Helvetica", 8));
painter.setRenderHints(QPainter::Antialiasing | QPainter::TextAntialiasing);
painter.drawText(200, 400, "Scene Maker"); // z = pointT4.z + distOverOp / 4
painter.end();
}
I don't know why, but it was the only object that was transparent. Also, the addition of the QPainter messesd something up with the lid of the teapot. Perhaps it's related to the glPolygonMode messy stuff going on?
In case it was QPainter sneakily disabling GL_DEPTH_TEST, which seemed to be the case, I re-enabled it afterwards, but it still didn't work.
mainWidget is a QOpenGLWidget.
So, what is the problem?
Pic:
(As you can see, the grid lines show through the teapot, but the cube doesn't, making it not fully transparent. )
Edit: I found out that all the other objects were transparent too.
Your depth test is disabled or your context doesn't have a depth buffer. First make sure that you set a pixel format with depth buffer during initialization. QOpenGLWidget documentation has an example of that:
QOpenGLWidget *widget = new QOpenGLWidget(parent);
QSurfaceFormat format;
format.setDepthBufferSize(24); // <--- this is what you need
format.setStencilBufferSize(8);
format.setVersion(3, 2);
format.setProfile(QSurfaceFormat::CoreProfile);
widget->setFormat(format); // must be called before the widget or its parent window gets shown
Next enable depth test in your drawing code with:
glEnable(GL_DEPTH_TEST);
Related
When I try to create a 2x8 R8 texture in webgl2, I get an error. This doesn't happen for a 4x8 texture. If I double the size of the input buffer compared to what I expect, the 2x8 succeeds.
Does webgl2 have a 'column alignment' of 4 when creating/reading textures?
Here is some code that reproduces the issue. I tested it on Windows in both Chrome and Firefox:
function test_read(w) {
let gl = document.createElement('canvas').getContext('webgl2');
let h = 8;
let data = new Uint8Array(w*h);
data[5] = 5;
let texture = gl.createTexture();
let frameBuffer = gl.createFramebuffer();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.R8, w, h, 0, gl.RED, gl.UNSIGNED_BYTE, data);
if (gl.getError() !== gl.NO_ERROR) {
return 'bad w=' + w;
}
return 'good w=' + w;
}
console.log(test_read(4)); // good w=4
console.log(test_read(2)); // bad w=2
The error code coming out is 0x502 (INVALID_OPERATION). A similar issue happens when reading textures that were created by expanding the buffer: it seems to expect a 'column alignment' of 4.
You need to set gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1)
The default UNPACK_ALIGNMENT is 4 which means WebGL expects every row of pixel to be a multiple of 4 bytes. Since you're using R8 (1 byte pixel) and a width of 2 your rows are only 2 bytes long. When you change the width to 4 it starts working.
function test_read(w) {
let gl = document.createElement('canvas').getContext('webgl2');
let h = 8;
let data = new Uint8Array(w*h);
data[5] = 5;
// ---=== ADDED ===---
gl.pixelStorei(gl.UNPACK_ALIGNMENT, 1);
let texture = gl.createTexture();
let frameBuffer = gl.createFramebuffer();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.R8, w, h, 0, gl.RED, gl.UNSIGNED_BYTE, data);
if (gl.getError() !== gl.NO_ERROR) {
return 'bad w=' + w;
}
return 'good w=' + w;
}
console.log(test_read(4)); // good w=4
console.log(test_read(2)); // bad w=2
As soon as I set glEnable(GL_DEPTH_TEST) in the following code, nothing except for the clear color gets drawn on screen.
window.cpp
void Window::initializeGL() {
makeCurrent();
initializeOpenGLFunctions();
glClearColor(0.0f, 0.03f, 0.2f, 1.0f);
Shaders::initShaders();
glEnable(GL_DEPTH_TEST);
currentLevel.addTiles();
viewMatrix.setToIdentity();
viewMatrix.translate(0.0f, 0.0f, -8.0f);
viewMatrix.rotate(45.0f, -1.0f, 0.0f, 0.0f);
}
void Window::resizeGL(int width, int height) {
const float fov = 45.0f,
zNear = 0.0f,
zFar = 1000.0f;
projectionMatrix.setToIdentity();
projectionMatrix.perspective(fov, width / float(height), zNear, zFar);
}
void Window::paintGL() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
currentLevel.setProjectionMatrix(projectionMatrix);
currentLevel.setViewMatrix(viewMatrix);
currentLevel.draw();
}
tile.cpp:
// Constructor with custom shader
Tile::Tile(QVector3D v1, QVector3D v2, QVector3D v3, QVector3D v4, QOpenGLShaderProgram* shaderProgram) :
vbo(QOpenGLBuffer::VertexBuffer),
cbo(QOpenGLBuffer::VertexBuffer),
ibo(QOpenGLBuffer::IndexBuffer) {
// Calculate surface normal & second set of vertices
QVector3D surfaceNormal = QVector3D::normal(v1, v2, v3);
QVector3D v1_n = v1 - surfaceNormal;
QVector3D v2_n = v2 - surfaceNormal;
QVector3D v3_n = v3 - surfaceNormal;
QVector3D v4_n = v4 - surfaceNormal;
// Set up rectangular mesh from given corner vectors
vertices = {
v1, v2, v3, v4,
v1_n, v2_n, v3_n, v4_n
};
colors = {
color1, color1, color2, color2,
color1, color1, color2, color2
};
indices = {
// Face 1
0, 1, 2,
2, 3, 0,
// Face 2
0, 4, 5,
5, 1, 0,
// Face 3
4, 5, 6,
6, 7, 4
};
this->shaderProgram = shaderProgram;
cacheShaderLocations();
createBuffers();
}
Tile::~Tile() {
vbo.destroy();
vao.destroy();
ibo.destroy();
}
// Cache Uniform Locations for shaders
void Tile::cacheShaderLocations() {
positionLocation = shaderProgram->attributeLocation("position");
colorLocation = shaderProgram->attributeLocation("color");
modelLocation = shaderProgram->uniformLocation("model");
viewLocation = shaderProgram->uniformLocation("view");
projectionLocation = shaderProgram->uniformLocation("projection");
}
// Create buffers
void Tile::createBuffers() {
// Vertex Buffer Object
vbo.create();
vbo.bind();
vbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
vbo.allocate(vertices.constData(), vertices.size() * sizeof(QVector3D));
vbo.release();
// Color Buffer Object
cbo.create();
cbo.bind();
cbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
cbo.allocate(colors.constData(), colors.size() * sizeof(QVector3D));
cbo.release();
// Index Buffer Object
ibo.create();
ibo.bind();
ibo.setUsagePattern(QOpenGLBuffer::StaticDraw);
ibo.allocate(indices.constData(), indices.size() * sizeof(GLushort));
ibo.release();
// Vertex Array Object
vao.create();
// Setup buffer attributes
shaderProgram->bind();
vao.bind();
vbo.bind();
shaderProgram->enableAttributeArray(positionLocation);
shaderProgram->setAttributeBuffer(positionLocation, GL_FLOAT, 0, 3, 0);
cbo.bind();
shaderProgram->enableAttributeArray(colorLocation);
shaderProgram->setAttributeBuffer(colorLocation, GL_FLOAT, 0, 3, 0);
ibo.bind();
vao.release();
// Release buffers & shader program
vbo.release();
cbo.release();
ibo.release();
shaderProgram->release();
}
void Tile::draw() {
shaderProgram->bind();
// Send uniforms to shader
shaderProgram->setUniformValue(projectionLocation, projectionMatrix);
shaderProgram->setUniformValue(viewLocation, viewMatrix);
shaderProgram->setUniformValue(modelLocation, modelMatrix);
// Draw vertices
vao.bind();
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_SHORT, 0);
vao.release();
shaderProgram->release();
}
As for the depth buffer itself, according to the Qt documentation it's enabled by default. In the main.cpp I set the surface format/context like this:
// Set OpenGL Version information
QSurfaceFormat format;
format.setDepthBufferSize(24);
format.setRenderableType(QSurfaceFormat::OpenGL);
format.setProfile(QSurfaceFormat::CoreProfile);
format.setVersion(3,3);
I really have no clue why nothing gets drawn when I try to use depth testing so I would greatly appreciate any help.
I use OpenGL shader for apply median filter to image. Input image I copy to in_fbo buffer. All work fine.
QGLFramebufferObject *in_fbo, *out_fbo;
painter.begin(in_fbo); //Copy QImage to QGLFramebufferObject
painter.drawImage(0,0,image_in,0,0,width,height);
painter.end();
out_fbo->bind();
glViewport( 0, 0, nWidth, nHeight );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho( 0.0, nWidth, 0.0, nHeight, -1.0, 1.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity( );
glEnable( GL_TEXTURE_2D );
out_fbo->drawTexture( QPointF(0.0,0.0), in_fbo->texture( ), GL_TEXTURE_2D );
But in shader code I need divide position of vertex by width and height of image, because texture coordinates are normalized to a range between 0 and 1.
How correctly calculate texture coordinates?
//vertex shader
varying vec2 pos;
void main( void )
{
pos = gl_Vertex.xy;
gl_Position = ftransform( );
}
//fragment shader
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int imgWidth;
uniform int imgHeight;
uniform int len;
varying vec2 pos;
#define MAX_LEN (100)
void main(){
float v[ MAX_LEN ];
for (int i = 0; i < len; i++) {
vec2 posi = pos + float(i);
posi.x = posi.x / float( imgWidth );
posi.y = posi.y / float( imgHeight );
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
Before I did it in OpenFrameworks. But shader for texture in OF does not work for texture in Qt. I suppose because OF create textures with textureTarget = GL_TEXTURE_RECTANGLE_ARB. Now the result of applying shader above isn't correct. It isn't identical with result of the old shader (there are few pixels with different colors). I don't know how modify shader above :(.
Old shaders:
//vertex
#version 120
#extension GL_ARB_texture_rectangle : enable
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
}
//fragment
#version 120
#extension GL_ARB_texture_rectangle : enable
uniform sampler2D texture0;
uniform int len;
void main(){
vec2 pos = gl_TexCoord[0].xy;
pos.x = int( pos.x );
pos.y = int( pos.y );
float v[ MAX_LEN ];
for (int i=0; i<len; i++) {
vec2 posi = pos + i;
posi.x = int( posi.x + 0.5 ) + 0.5;
posi.y = int( posi.y + 0.5 ) + 0.5;
v[i] = texture2D(texture0, posi).r;
}
//
//.... Calculating new value
//
gl_FragColor = vec4( m, m, m, 1.0 );
}
OpenGL code from OpenFrameworks lib
texData.width = w;
texData.height = h;
texData.tex_w = w;
texData.tex_h = h;
texData.textureTarget = GL_TEXTURE_RECTANGLE_ARB;
texData.bFlipTexture = true;
texData.glType = GL_RGBA;
// create & setup FBO
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
// Create the render buffer for depth
glGenRenderbuffersEXT(1, &depthBuffer);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBuffer);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, texData.tex_w, texData.tex_h);
// create & setup texture
glGenTextures(1, (GLuint *)(&texData.textureID)); // could be more then one, but for now, just one
glBindTexture(texData.textureTarget, (GLuint)(texData.textureID));
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(texData.textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(texData.textureTarget, 0, texData.glType, texData.tex_w, texData.tex_h, 0, texData.glType, GL_UNSIGNED_BYTE, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
// attach it to the FBO so we can render to it
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, texData.textureTarget, (GLuint)texData.textureID, 0);
I do not think you actually want to use the texture's dimensions to do this. From the sounds of things this is a simple fullscreen image filter and you really just want fragment coordinates mapped into the range [0.0,1.0]. If this is the case, then gl_FragCoord.xy / viewport.xy, where viewport is a 2D uniform that defines the width and height of your viewport ought to work for your texture coordinates (in the fragment shader).
vec2 texCoord = vec2 (transformed_pos.x, transformed_pos.y) / transformed_pos.w * vec2 (0.5, 0.5) + vec2 (1.0, 1.0) may also work using the same principle -- clip-space coordinates transformed into NDC and then mapped to texture-space. This approach will not properly account for texel centers ((0.5, 0.5) rather than (0.0, 0.0)), however and can present problems when texture filtering is enabled and the wrap mode is not GL_CLAMP_TO_EDGE.
I would like to display sample6 of the OptixSDK in a QGLWidget.
My application has only 3 QSlider for the rotation around the X,Y,Z axis and the QGLWidget.
For my understanding, paintGL() gets called whenever updateGL() is called by my QSlider or Mouseevents. Then I initialize a rotation matrix and apply this matrix to the PinholeCamera in order to trace the scene with new transformed cameracoordinates, right?
When tracing is finished i get the outputbuffer and use it draw the pixels with glDrawPixels(), just like in GLUTdisplay.cpp given in the OptiX framework.
But my issue is that the image is skewed/distorted. For example I wanted to display a ball, but the ball is extremley flatened, but the rotation works fine.
When I am zooming out, it seems that the Image scales much slower horizontally than vertically.
I am almost sure/hope that it has to do something with the gl...() functions that are not used properly. What am I missing? Can someone help me out?
For the completeness i post my paintGL() and updateGL() code.
void MyGLWidget::initializeGL()
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
m_scene = new MeshViewer();
m_scene->setMesh( (std::string( sutilSamplesDir() ) + "/ball.obj").c_str());
int buffer_width, buffer_height;
// Set up scene
SampleScene::InitialCameraData initial_camera_data;
m_scene->setUseVBOBuffer( false );
m_scene->initScene( initial_camera_data );
int m_initial_window_width = 400;
int m_initial_window_height = 400;
if( m_initial_window_width > 0 && m_initial_window_height > 0)
m_scene->resize( m_initial_window_width, m_initial_window_height );
// Initialize camera according to scene params
m_camera = new PinholeCamera( initial_camera_data.eye,
initial_camera_data.lookat,
initial_camera_data.up,
-1.0f, // hfov is ignored when using keep vertical
initial_camera_data.vfov,
PinholeCamera::KeepVertical );
Buffer buffer = m_scene->getOutputBuffer();
RTsize buffer_width_rts, buffer_height_rts;
buffer->getSize( buffer_width_rts, buffer_height_rts );
buffer_width = static_cast<int>(buffer_width_rts);
buffer_height = static_cast<int>(buffer_height_rts);
float3 eye, U, V, W;
m_camera->getEyeUVW( eye, U, V, W );
SampleScene::RayGenCameraData camera_data( eye, U, V, W );
// Initial compilation
m_scene->getContext()->compile();
// Accel build
m_scene->trace( camera_data );
m_scene->getContext()->launch( 0, 0 );
// Initialize state
glMatrixMode(GL_PROJECTION);glLoadIdentity();glOrtho(0, 1, 0, 1, -1, 1 );
glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glViewport(0, 0, buffer_width, buffer_height);
}
And here is paintGL()
void MyGLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
float3 eye, U, V, W;
m_camera->getEyeUVW( eye, U, V, W );
SampleScene::RayGenCameraData camera_data( eye, U, V, W );
{
nvtx::ScopedRange r( "trace" );
m_scene->trace( camera_data );
}
// Draw the resulting image
Buffer buffer = m_scene->getOutputBuffer();
RTsize buffer_width_rts, buffer_height_rts;
buffer->getSize( buffer_width_rts, buffer_height_rts );
int buffer_width = static_cast<int>(buffer_width_rts);
int buffer_height = static_cast<int>(buffer_height_rts);
RTformat buffer_format = buffer.get()->getFormat();
GLvoid* imageData = buffer->map();
assert( imageData );
switch (buffer_format) {
/*... set gl_data_type and gl_format ...*/
}
RTsize elementSize = buffer->getElementSize();
int align = 1;
if ((elementSize % 8) == 0) align = 8;
else if ((elementSize % 4) == 0) align = 4;
else if ((elementSize % 2) == 0) align = 2;
glPixelStorei(GL_UNPACK_ALIGNMENT, align);
gldata = QGLWidget::convertToGLFormat(image_data);
NVTX_RangePushA("glDrawPixels");
glDrawPixels( static_cast<GLsizei>( buffer_width ), static_cast<GLsizei>( buffer_height ),gl_format, gl_data_type, imageData);
// glDraw
NVTX_RangePop();
buffer->unmap();
}
After hours of debugging, I found out that I forgot to set the Camera-parameters right, it had nothing to go to with the OpenGL stuff.
My U-coordinate, the horizontal axis of view plane was messed up, but the V,W and eye coordinates were right.
After I added these lines in initializeGL()
m_camera->setParameters(initial_camera_data.eye,
initial_camera_data.lookat,
initial_camera_data.up,
initial_camera_data.vfov,
initial_camera_data.vfov,
PinholeCamera::KeepVertical );
everything was right.
I seem to have some problems with getting the position of a physicsBody.
I want to set the position of a physicsBody (bodyA) to the position of an other physicsBody (bodyB), when this one collides with a third one.
I tried using _bodyA.position = _bodyB.position; but this doesn't work.
The problem is, that the physicsBody I want to get the position of is moving around all the time. So I might need a method that checks the position of the physicsBody in every frame and when the physicsBody collides, the method returns the CGPoint.
This might be a simple task, but I can't figure it out. Thanks for help!
I did up a quick sample project for you to take a look at. You can fine tune the code to fit your exact needs but it will show you how to do what you asked.
Tap anywhere on the screen to launch the blue square into the red square. Once the two make contact, the green square's position will change to the red square.
Another alternative to the way I wrote the code is to have the sprites added to an array but that all depends on how your code is set up.
Side note - you cannot update a sprite's position in the didBeginContact: section because it will not work. That is why I created the CGPoint to store the position and update Object C during the next cycle in update:
Here is the code:
#import "MyScene.h"
typedef NS_OPTIONS(uint32_t, CNPhysicsCategory)
{
Category1 = 1 << 0,
Category2 = 1 << 1,
Category3 = 1 << 2,
};
#interface MyScene()<SKPhysicsContactDelegate>
#end
#implementation MyScene
{
SKSpriteNode *objectA;
SKSpriteNode *objectB;
SKSpriteNode *objectC;
CGPoint newPositionForObjectC;
}
-(id)initWithSize:(CGSize)size
{
if (self = [super initWithSize:size])
{
self.physicsWorld.contactDelegate = self;
objectA = [SKSpriteNode spriteNodeWithColor:[SKColor redColor] size:CGSizeMake(30, 30)];
objectA.position = CGPointMake(110, 20);
objectA.name = #"objectA";
objectA.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:objectA.size];
objectA.physicsBody.categoryBitMask = Category1;
objectA.physicsBody.collisionBitMask = Category2;
objectA.physicsBody.contactTestBitMask = Category2;
objectA.physicsBody.affectedByGravity = NO;
[self addChild:objectA];
objectB = [SKSpriteNode spriteNodeWithColor:[SKColor blueColor] size:CGSizeMake(30, 30)];
objectB.position = CGPointMake(100, 200);
objectB.name = #"objectB";
objectB.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:objectB.size];
objectB.physicsBody.categoryBitMask = Category2;
objectB.physicsBody.collisionBitMask = Category1;
objectB.physicsBody.contactTestBitMask = Category1;
objectB.physicsBody.affectedByGravity = NO;
[self addChild:objectB];
objectC = [SKSpriteNode spriteNodeWithColor:[SKColor greenColor] size:CGSizeMake(30, 30)];
objectC.position = CGPointMake(250, 250);
objectC.name = #"objectC";
objectC.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:objectC.size];
objectC.physicsBody.categoryBitMask = Category3;
objectC.physicsBody.collisionBitMask = 0;
objectC.physicsBody.contactTestBitMask = 0;
objectC.physicsBody.affectedByGravity = NO;
[self addChild:objectC];
newPositionForObjectC = CGPointMake(0, 0);
}
return self;
}
- (void)didBeginContact:(SKPhysicsContact *)contact
{
uint32_t collision = (contact.bodyA.categoryBitMask | contact.bodyB.categoryBitMask);
if (collision == (Category1 | Category2))
{
newPositionForObjectC = CGPointMake(contact.bodyA.node.position.x, contact.bodyA.node.position.y);
}
}
-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
[objectA.physicsBody applyImpulse:CGVectorMake(0, 5)];
}
-(void)update:(CFTimeInterval)currentTime
{
if(newPositionForObjectC.x > 0)
{
objectC.position = newPositionForObjectC;
newPositionForObjectC = CGPointMake(0, 0);
}
}
#end