Issue with VkRenderPass load operation for Skybox rendering - intel
I seem to have another issue when it comes to renderpasses in Vulkan.
Drawing my scene, I first submit a commandbuffer to render a sky using atmospheric scattering onto a cubemap, to which I then use for my forward pass to draw out the sky and sun.
The renderpass used when drawing the skybox and storing into a cubemap for sampling:
m_pFrameBuffer = rhi->CreateFrameBuffer();
VkImageView attachment = m_RenderTexture->View();
VkAttachmentDescription attachDesc = CreateAttachmentDescription(
m_RenderTexture->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_CLEAR,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
m_RenderTexture->Samples()
);
VkAttachmentReference colorRef = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL };
std::array<VkSubpassDependency, 2> dependencies;
dependencies[0] = CreateSubPassDependency(
VK_SUBPASS_EXTERNAL,
VK_ACCESS_MEMORY_READ_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_DEPENDENCY_BY_REGION_BIT
);
dependencies[1] = CreateSubPassDependency(
0,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_SUBPASS_EXTERNAL,
VK_ACCESS_MEMORY_READ_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_DEPENDENCY_BY_REGION_BIT
);
VkSubpassDescription subpassDesc = { };
subpassDesc.colorAttachmentCount = 1;
subpassDesc.pColorAttachments = &colorRef;
subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
VkRenderPassCreateInfo renderpassCi = { };
renderpassCi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderpassCi.attachmentCount = 1;
renderpassCi.pAttachments = &attachDesc;
renderpassCi.dependencyCount = static_cast<u32>(dependencies.size());
renderpassCi.pDependencies = dependencies.data();
renderpassCi.subpassCount = 1;
renderpassCi.pSubpasses = &subpassDesc;
VkFramebufferCreateInfo framebufferCi = { };
framebufferCi.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferCi.height = kTextureSize;
framebufferCi.width = kTextureSize;
framebufferCi.attachmentCount = 1;
framebufferCi.layers = 1;
framebufferCi.pAttachments = &attachment;
m_pFrameBuffer->Finalize(framebufferCi, renderpassCi);
After rendering the skybox, and storing it into a cubemap, I used the following renderpass to sample the sky onto the rendered scene. This pass uses VK_LOAD_OP_LOAD so as to not clear the rendered scene when drawing the skybox onto it:
// Create a renderpass for the pbr overlay.
Texture* pbrColor = gResources().GetRenderTexture(PBRColorAttachStr);
Texture* pbrNormal = gResources().GetRenderTexture(PBRNormalAttachStr);
Texture* pbrPosition = gResources().GetRenderTexture(PBRPositionAttachStr);
Texture* pbrRoughMetal = gResources().GetRenderTexture(PBRRoughMetalAttachStr);
Texture* pbrDepth = gResources().GetRenderTexture(PBRDepthAttachStr);
Texture* RTBright = gResources().GetRenderTexture(RenderTargetBrightStr);
std::array<VkAttachmentDescription, 6> attachmentDescriptions;
VkSubpassDependency dependenciesNative[2];
attachmentDescriptions[0] = CreateAttachmentDescription(
pbrColor->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
pbrColor->Samples()
);
attachmentDescriptions[1] = CreateAttachmentDescription(
pbrNormal->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
pbrNormal->Samples()
);
attachmentDescriptions[2] = CreateAttachmentDescription(
RTBright->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
RTBright->Samples()
);
attachmentDescriptions[3] = CreateAttachmentDescription(
pbrPosition->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
pbrPosition->Samples()
);
attachmentDescriptions[4] = CreateAttachmentDescription(
pbrRoughMetal->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
pbrRoughMetal->Samples()
);
attachmentDescriptions[5] = CreateAttachmentDescription(
pbrDepth->Format(),
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
pbrDepth->Samples()
);
dependenciesNative[0] = CreateSubPassDependency(
VK_SUBPASS_EXTERNAL,
VK_ACCESS_MEMORY_READ_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_DEPENDENCY_BY_REGION_BIT
);
dependenciesNative[1] = CreateSubPassDependency(
0,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_SUBPASS_EXTERNAL,
VK_ACCESS_MEMORY_READ_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_DEPENDENCY_BY_REGION_BIT
);
std::array<VkAttachmentReference, 5> attachmentColors;
VkAttachmentReference attachmentDepthRef = { static_cast<u32>(attachmentColors.size()), VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL };
attachmentColors[0].attachment = 0;
attachmentColors[0].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentColors[1].attachment = 1;
attachmentColors[1].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentColors[2].attachment = 2;
attachmentColors[2].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentColors[3].attachment = 3;
attachmentColors[3].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentColors[4].attachment = 4;
attachmentColors[4].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = static_cast<u32>(attachmentColors.size());
subpass.pColorAttachments = attachmentColors.data();
subpass.pDepthStencilAttachment = &attachmentDepthRef;
VkRenderPassCreateInfo renderpassCI = CreateRenderPassInfo(
static_cast<u32>(attachmentDescriptions.size()),
attachmentDescriptions.data(),
2,
dependenciesNative,
1,
&subpass
);
VkResult result =
vkCreateRenderPass(rhi->LogicDevice()->Native(), &renderpassCI, nullptr, &m_SkyboxRenderPass);
This is the command buffer for rendering the sky onto my scene. I submit this commandbuffer after rendering the scene to take advantage of early z rejection:
if (m_pSkyboxCmdBuffer) {
m_pRhi->DeviceWaitIdle();
m_pSkyboxCmdBuffer->Reset(VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT);
}
VkCommandBufferBeginInfo beginInfo = { };
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
CommandBuffer* buf = m_pSkyboxCmdBuffer;
FrameBuffer* skyFrameBuffer = gResources().GetFrameBuffer(PBRFrameBufferStr);
GraphicsPipeline* skyPipeline = gResources().GetGraphicsPipeline(SkyboxPipelineStr);
DescriptorSet* global = m_pGlobal->Set();
DescriptorSet* skybox = gResources().GetDescriptorSet(SkyboxDescriptorSetStr);
VkDescriptorSet descriptorSets[] = {
global->Handle(),
skybox->Handle()
};
buf->Begin(beginInfo);
std::array<VkClearValue, 6> clearValues;
clearValues[0].color = { 0.0f, 0.0f, 0.0f, 1.0f };
clearValues[1].color = { 0.0f, 0.0f, 0.0f, 1.0f };
clearValues[2].color = { 0.0f, 0.0f, 0.0f, 1.0f };
clearValues[3].color = { 0.0f, 0.0f, 0.0f, 1.0f };
clearValues[4].color = { 0.0f, 0.0f, 0.0f, 1.0f };
clearValues[5].depthStencil = { 1.0f, 0 };
VkViewport viewport = {};
viewport.height = (r32)m_pWindow->Height();
viewport.width = (r32)m_pWindow->Width();
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
viewport.y = 0.0f;
viewport.x = 0.0f;
VkRenderPassBeginInfo renderBegin = { };
renderBegin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderBegin.framebuffer = skyFrameBuffer->Handle();
renderBegin.renderPass = m_pSky->GetSkyboxRenderPass();
renderBegin.clearValueCount = static_cast<u32>(clearValues.size());
renderBegin.pClearValues = clearValues.data();
renderBegin.renderArea.offset = { 0, 0 };
renderBegin.renderArea.extent = m_pRhi->SwapchainObject()->SwapchainExtent();
// Start the renderpass.
buf->BeginRenderPass(renderBegin, VK_SUBPASS_CONTENTS_INLINE);
buf->SetViewPorts(0, 1, &viewport);
buf->BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, skyPipeline->Pipeline());
buf->BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, skyPipeline->Layout(), 0, 2, descriptorSets, 0, nullptr);
VertexBuffer* vertexbuffer = m_pSky->GetSkyboxVertexBuffer();
IndexBuffer* idxBuffer = m_pSky->GetSkyboxIndexBuffer();
VkDeviceSize offsets[] = { 0 };
VkBuffer vert = vertexbuffer->Handle()->NativeBuffer();
VkBuffer ind = idxBuffer->Handle()->NativeBuffer();
buf->BindVertexBuffers(0 , 1, &vert, offsets);
buf->BindIndexBuffer(ind, 0, VK_INDEX_TYPE_UINT32);
buf->DrawIndexed(idxBuffer->IndexCount(), 1, 0, 0, 0);
buf->EndRenderPass();
buf->End();
Finally, I submit it inside my rendering function:
// TODO(): Need to clean this up.
VkCommandBuffer offscreenCmd = m_Offscreen._CmdBuffers[m_Offscreen._CurrCmdBufferIndex]->Handle();
VkCommandBuffer skyBuffers[] = { m_Offscreen._CmdBuffers[m_Offscreen._CurrCmdBufferIndex]->Handle(), m_pSky->CmdBuffer()->Handle() };
VkSemaphore skyWaits[] = { m_Offscreen._Semaphore->Handle(), m_pSky->SignalSemaphore()->Handle() };
VkSemaphore waitSemas[] = { m_pRhi->SwapchainObject()->ImageAvailableSemaphore() };
VkSemaphore signalSemas[] = { m_Offscreen._Semaphore->Handle() };
VkSemaphore shadowSignal[] = { m_Offscreen._ShadowSema->Handle() };
VkPipelineStageFlags waitFlags[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT };
VkSubmitInfo offscreenSI = {};
offscreenSI.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
offscreenSI.pCommandBuffers = &offscreenCmd;
offscreenSI.commandBufferCount = 1;
offscreenSI.signalSemaphoreCount = 1;
offscreenSI.pSignalSemaphores = signalSemas;
offscreenSI.waitSemaphoreCount = 1;
offscreenSI.pWaitSemaphores = waitSemas;
offscreenSI.pWaitDstStageMask = waitFlags;
VkSubmitInfo skyboxSI = offscreenSI;
VkSemaphore skyboxWaits[] = { m_Offscreen._Semaphore->Handle() };
VkSemaphore skyboxSignal[] = { m_SkyboxFinished->Handle() };
VkCommandBuffer skyboxCmd = m_pSkyboxCmdBuffer->Handle();
skyboxSI.commandBufferCount = 1;
skyboxSI.pCommandBuffers = &skyboxCmd;
skyboxSI.pSignalSemaphores = skyboxSignal;
skyboxSI.pWaitSemaphores = skyboxWaits;
VkSubmitInfo hdrSI = offscreenSI;
VkSemaphore hdrWaits[] = { m_SkyboxFinished->Handle() };
VkSemaphore hdrSignal[] = { m_HDR._Semaphore->Handle() };
VkCommandBuffer hdrCmd = m_HDR._CmdBuffers[m_HDR._CurrCmdBufferIndex]->Handle();
hdrSI.pCommandBuffers = &hdrCmd;
hdrSI.pSignalSemaphores = hdrSignal;
hdrSI.pWaitSemaphores = hdrWaits;
VkSemaphore waitSemaphores = m_HDR._Semaphore->Handle();
if (!m_HDR._Enabled) waitSemaphores = m_Offscreen._Semaphore->Handle();
// Update materials before rendering the frame.
UpdateMaterials();
// begin frame. This is where we start our render process per frame.
BeginFrame();
while (m_Offscreen._CmdBuffers[m_HDR._CurrCmdBufferIndex]->Recording() || !m_pRhi->CmdBuffersComplete()) {}
// Render shadow map here. Primary shadow map is our concern.
if (m_pLights->PrimaryShadowEnabled()) {
VkCommandBuffer shadowbuf[] = { m_Offscreen._ShadowCmdBuffers[m_Offscreen._CurrCmdBufferIndex]->Handle() };
VkSubmitInfo shadowSubmit = { };
shadowSubmit.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
shadowSubmit.pCommandBuffers = shadowbuf;
shadowSubmit.commandBufferCount = 1;
shadowSubmit.signalSemaphoreCount = 1;
shadowSubmit.waitSemaphoreCount = 1;
shadowSubmit.pWaitSemaphores = waitSemas;
shadowSubmit.pSignalSemaphores = shadowSignal;
shadowSubmit.pWaitDstStageMask = waitFlags;
// Submit shadow rendering.
m_pRhi->GraphicsSubmit(shadowSubmit);
offscreenSI.pWaitSemaphores = shadowSignal;
}
// Check if sky needs to update it's cubemap.
if (m_pSky->NeedsRendering()) {
skyboxSI.waitSemaphoreCount = 2;
skyboxSI.pWaitSemaphores = skyWaits;
offscreenSI.commandBufferCount = 2;
offscreenSI.signalSemaphoreCount = 2;
offscreenSI.pSignalSemaphores = skyWaits;
offscreenSI.pCommandBuffers = skyBuffers;
m_pSky->MarkClean();
}
// Offscreen PBR Forward Rendering Pass.
m_pRhi->GraphicsSubmit(offscreenSI);
// Render Sky onto our render textures.
m_pRhi->GraphicsSubmit(skyboxSI);
// High Dynamic Range and Gamma Pass.
if (m_HDR._Enabled) m_pRhi->GraphicsSubmit(hdrSI);
// Before calling this cmd buffer, we want to submit our offscreen buffer first, then
// sent our signal to our swapchain cmd buffers.
// TODO(): We want to hold off on signalling GraphicsFinished Semaphore, and instead
// have it signal the SignalUI semaphore instead. UI Overlay will be the one to use
// GraphicsFinished Semaphore to signal end of frame rendering.
VkSemaphore signal = m_pRhi->GraphicsFinishedSemaphore();
VkSemaphore uiSig = m_pUI->Signal()->Handle();
m_pRhi->SubmitCurrSwapchainCmdBuffer(1, &waitSemaphores, 1, &signal);
// Render the Overlay.
RenderOverlay();
EndFrame();
On an Nvidia GTX 870M, the results seem to work as expected,
However, using Intel HD Graphics 620, I get this screenshot, unfortunately I can't display here because it's too big: https://github.com/CheezBoiger/Recluse-Game/blob/master/Regression/Shaders/ForwardPass.png
It seems as though the scene from previous frames are left un-cleared onto the color attachment, as if it was rendering onto a separate surface and using that instead, but it should be cleared every frame at the beginning of rendering...
Removing VK_LOAD_OP_LOAD and replacing with VK_LOAD_OP_CLEAR, the situation clears, however, only the skybox is rendered... I am wondering if my render pass is not doing something that it needs to be doing on Intel hardware, or am I going about drawing the skybox onto my rendered scene all wrong?
Much appreciated on the help.
* Update *
Problem fixed, with solution by #Ekzuzy below.
Final Image on Intel Hardware after fix:
You always provide UNDEFINED layout for the initial layout in all Your render passes and for all attachments. Layout transition from UNDEFINED layout to any other layout doesn't guarantee image contents to be preserved. So if You create a render pass with LOAD value for the load op, You need to provide an actual layout given image has just before the render pass starts. This applies to other layout transitions as well (through memory barriers).
As for clears, some images should be cleared at the beginning of a frame or render pass. So for them You can leave UNDEFINED as the initial layout but You should change the load op to clear.
As to why this works on Nvidia and doesn't work on Intel - layout transitions don't have any effect on Nvidia's hardware, but they are important on Intel's platforms (and on AMD's too). So skipping (or setting improper) layout transitions, even though it violates the specification, it still should work on Nvidia. But don't do that just because it works. Such approach is invalid. And future platforms, even from the same vendor, may behave differently.
Related
CircularProgressIndicator doesn't animated within ComposeView in Fragment
I am having this in a Fragment return ComposeView(requireContext()).apply { config = ChartsConfig() setViewCompositionStrategy(ViewCompositionStrategy.DisposeOnViewTreeLifecycleDestroyed) setContent { MaterialTheme { val progressValue = 1.0f val infiniteTransition = rememberInfiniteTransition() val progressAnimationValue by infiniteTransition.animateFloat( initialValue = 0.0f, targetValue = progressValue, animationSpec = infiniteRepeatable( animation = tween(durationMillis = 4000, easing = LinearEasing) ) ) println(progressAnimationValue) CircularProgressIndicator(progress = progressAnimationValue) } } } If I try the same composable within a compose test project activity it animates just fine. However, it doesnt' when I put it in a fragment as shown here
Group an Array of Vectors according to certain Floors in Three.js
I have a list of Vectors which represent points on three different floors in three.js. I am trying to group these vectors according to the floor they belong to. Is there a good formula to do this? Perhaps find height from on vector or something. Not sure how to go about this. Any help would be appreciated. Thanks, Rob
As Wilt said... Can't really help you without more info. Still, if your floors are even and all stand on the xz plane (in my example), You may indeed check the points' height (position.y) against the floors'. var container, renderer, scene, camera, controls; var floors = []; var points = [], materials = [], heights = []; init(); animate(); function init() { // renderer renderer = new THREE.WebGLRenderer({ antialias: true, alpha: true }); renderer.setSize(window.innerWidth, window.innerHeight); container = document.createElement('div'); document.body.appendChild(container); container.appendChild(renderer.domElement); // scene scene = new THREE.Scene(); // camera + controls camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 1, 10000); camera.position.set(0, 50, 750); camera.lookAt(scene.position); controls = new THREE.OrbitControls(camera, renderer.domElement); controls.autoRotate = true; //floors for(i=0; i<3; i++) { var planeGeometry = new THREE.BoxGeometry(500, 500, 10, 10); var planeMaterial = new THREE.MeshBasicMaterial({ color: 0xffffff * Math.random(), side: THREE.DoubleSide, transparent: true, opacity : 0.3, depthWrite : false //get rid of coplanar glitches wall/floor }); materials.push(planeMaterial); var plane = new THREE.Mesh(planeGeometry, planeMaterial); plane.rotation.x = Math.PI / 2; //plane.rotation.y = Math.PI / 8; //Uncomment to see this doesn't work if the floors move, i.e. changing rotation/position. If this is what you need, just raycast from point to floor in the animation loop and count how many floors the ray goes through (intersects.length) plane.position.y = 75*i; heights.push(plane.position.y); floors.push(plane); scene.add(plane); } //wall var height = heights[2]; var planeGeometry = new THREE.BoxGeometry(500, height+100, 10, 10); var planeMaterial = new THREE.MeshBasicMaterial({ color: 0xffffff * Math.random(), side: THREE.DoubleSide, transparent: true, opacity:0.3, depthWrite : false //get rid of coplanar glitches wall/floor }); materials.push(planeMaterial); var plane = new THREE.Mesh(planeGeometry, planeMaterial); plane.position.y = heights[1]+45; plane.position.z = -510/2; scene.add(plane); // points for (i=0; i<200; i++) { var sphereGeometry = new THREE.SphereGeometry(3, 32, 16); var sphere = new THREE.Mesh(sphereGeometry); sphere.position.x = Math.random() * 500 - 250; sphere.position.y = Math.random() * 300 - 100; sphere.position.z = Math.random() * 500 - 250; scene.add(sphere); points.push(sphere); } // events window.addEventListener('resize', onWindowResize, false); } function onWindowResize(event) { camera.aspect = window.innerWidth / window.innerHeight; camera.updateProjectionMatrix(); renderer.setSize(window.innerWidth, window.innerHeight); } function addSpheres() { //addSpheres for (i=0;i<200;i++) { var that = points[i].position.y; points[i].position.y = ( that < heights[0] ) ? 200 : that - 0.5; if ( that > heights[0] && that < heights[1] ) points[i].material = materials[0]; if ( that < heights[2] && that > heights[1] ) points[i].material = materials[1]; if ( that > heights[2] ) points[i].material = materials[2]; points[i].material.needsUpdate = true; } } function animate() { controls.update(); addSpheres(); renderer.render(scene, camera); requestAnimationFrame(animate); } <script src="http://threejs.org/build/three.min.js"></script> <script src="http://threejs.org/examples/js/controls/OrbitControls.js"></script> Now, feel free to "group" these points according to your needs. Please note that "vectors" are different from "points". Read more about the difference. Raycasting would be the way to go if you had a more complex scene (moving floors/different planes, points moving in different directions).
Paper.js animating : Movement according to a paths normal
I am trying to animate a line and its normal ( another line ). But when i change position after or before setting rotation of the normal strange animation occurs. Is there anybody who has an idea on that? I have this code in sketchpad: http://sketch.paperjs.org/#S/jVJLb5tAEP4rKy7BqgvUbS+OcogstT2klaUeeohzIDA2q+AZtAyOI8v/vbM7mFDLUcqJne8xz0OE+RaiefT7CbioomlUUOnfu9wZ6hjcD3NjZll2vcIh9EdCn4fQxlHXSATh2Xz3//GkR9rGIvTIMucqPuzn2dS8zLOjp6x4xYGS5GU5YJo0/XKZ8lELeCWe0Vp29AQLqslJ4isH5VWPa0m4HNcTtIjLc9lj3YHXCeLzBj5Z5FgqzCaTC8BXRYJfmgrc2B2xz7VMHqnDsk2YmjtYc6BoQWFy3mhR2bp0gPF96GIqqgfvpYSGWsuWUNxkArIL373M/6hWqJ3VVAhBp7ABvqMi96Jbjj/NstNGkNw2r8e8XyHyyuoHMsopxvKUJnUgjjhniNUpyXFTg+p2Fp4Twm9ODkpk6w4L7xDDDpAn5rBCM/r6GXC4E4tdK5KfspNEHipJ2IrRab3KLOgfrjwvc9PULCqpDQxXYPZmaIfWIdLCZisyc+pLVf0JKdbezx607+TFfLgZUr/L3nv2GXfcw7uLGo/pP5c2JDnp3l7h2D1c6lsLFcdjdPwL var outerH = 200; var outerW = 300; var group = new Group(); var spine = new Path({x:0, y:0}); spine.add({x:0, y:outerH/4}); spine.add({x:-outerW, y:outerH}); spine.strokeColor = 'red'; var nP = new Path(); nP.strokeColor = 'blue'; nP.add(new Point(0, 0)) nP.add(new Point(50, 0)); //nP.pivot = nP.bounds.topLeft; group.addChildren([spine, nP]); group.position = {x:200, y:300}; var loc = spine.getLocationAt(120); var normal = spine.getNormalAt(120); nP.position = loc.point; nP.rotate(normal.angle); view.onFrame = function(event) { var sinus = Math.sin(event.time ); var cosinus = Math.cos(event.time ); // Change the x position of the segment point; spine.segments[2].point.y += cosinus ; spine.segments[2].point.x += sinus ; var loc = spine.getLocationAt(120); var normal = spine.getNormalAt(120); nP.position = loc.point; //nP.rotate(normal.angle); } If I uncomment -> nP.rotate(normal.angle); nP is not rotating with the line normal point?
Please read the following post on the mailing list that explains this behavior and offers an option to switch paper.js into a mode that simplifies this scenario: https://groups.google.com/forum/#!searchin/paperjs/applymatrix/paperjs/4EIRSGzcaUI/seKoNT-PSpwJ
The problem is that in paper.js when you rotate an item and move it, the new rotation is accepted as 0 degree. So I am keeping an oldAngle variable outside the onFrame event. Than do the necessary math in onFrame to calculate the right rotation... The right code: var group = new Group(); var spine = new Path({x:0, y:0}); spine.add({x:0, y:50}); spine.add({x:-300, y:200}); spine.strokeColor = 'red'; var nP = new Path(); nP.strokeColor = 'blue'; nP.add(new Point(0, 0)) nP.add(new Point(50, 0)); group.addChildren([spine, nP]); group.position = {x:200, y:300}; var loc = spine.getLocationAt(120); var normal = spine.getNormalAt(120); nP.position = loc.point; nP.rotation = normal.angle; var oldAngle = normal.angle; // keep the old rotation angle in this variable view.onFrame = function(event) { var sinus = Math.sin(event.time ); spine.segments[2].point.y += sinus ; spine.segments[2].point.x += sinus ; var loc = spine.getLocationAt(120); var normal = spine.getNormalAt(120); nP.position = loc.point; nP.rotation += normal.angle - oldAngle; // here we do the necessary math oldAngle = normal.angle; }
Get physicsBody position when collide
I seem to have some problems with getting the position of a physicsBody. I want to set the position of a physicsBody (bodyA) to the position of an other physicsBody (bodyB), when this one collides with a third one. I tried using _bodyA.position = _bodyB.position; but this doesn't work. The problem is, that the physicsBody I want to get the position of is moving around all the time. So I might need a method that checks the position of the physicsBody in every frame and when the physicsBody collides, the method returns the CGPoint. This might be a simple task, but I can't figure it out. Thanks for help!
I did up a quick sample project for you to take a look at. You can fine tune the code to fit your exact needs but it will show you how to do what you asked. Tap anywhere on the screen to launch the blue square into the red square. Once the two make contact, the green square's position will change to the red square. Another alternative to the way I wrote the code is to have the sprites added to an array but that all depends on how your code is set up. Side note - you cannot update a sprite's position in the didBeginContact: section because it will not work. That is why I created the CGPoint to store the position and update Object C during the next cycle in update: Here is the code: #import "MyScene.h" typedef NS_OPTIONS(uint32_t, CNPhysicsCategory) { Category1 = 1 << 0, Category2 = 1 << 1, Category3 = 1 << 2, }; #interface MyScene()<SKPhysicsContactDelegate> #end #implementation MyScene { SKSpriteNode *objectA; SKSpriteNode *objectB; SKSpriteNode *objectC; CGPoint newPositionForObjectC; } -(id)initWithSize:(CGSize)size { if (self = [super initWithSize:size]) { self.physicsWorld.contactDelegate = self; objectA = [SKSpriteNode spriteNodeWithColor:[SKColor redColor] size:CGSizeMake(30, 30)]; objectA.position = CGPointMake(110, 20); objectA.name = #"objectA"; objectA.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:objectA.size]; objectA.physicsBody.categoryBitMask = Category1; objectA.physicsBody.collisionBitMask = Category2; objectA.physicsBody.contactTestBitMask = Category2; objectA.physicsBody.affectedByGravity = NO; [self addChild:objectA]; objectB = [SKSpriteNode spriteNodeWithColor:[SKColor blueColor] size:CGSizeMake(30, 30)]; objectB.position = CGPointMake(100, 200); objectB.name = #"objectB"; objectB.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:objectB.size]; objectB.physicsBody.categoryBitMask = Category2; objectB.physicsBody.collisionBitMask = Category1; objectB.physicsBody.contactTestBitMask = Category1; objectB.physicsBody.affectedByGravity = NO; [self addChild:objectB]; objectC = [SKSpriteNode spriteNodeWithColor:[SKColor greenColor] size:CGSizeMake(30, 30)]; objectC.position = CGPointMake(250, 250); objectC.name = #"objectC"; objectC.physicsBody = [SKPhysicsBody bodyWithRectangleOfSize:objectC.size]; objectC.physicsBody.categoryBitMask = Category3; objectC.physicsBody.collisionBitMask = 0; objectC.physicsBody.contactTestBitMask = 0; objectC.physicsBody.affectedByGravity = NO; [self addChild:objectC]; newPositionForObjectC = CGPointMake(0, 0); } return self; } - (void)didBeginContact:(SKPhysicsContact *)contact { uint32_t collision = (contact.bodyA.categoryBitMask | contact.bodyB.categoryBitMask); if (collision == (Category1 | Category2)) { newPositionForObjectC = CGPointMake(contact.bodyA.node.position.x, contact.bodyA.node.position.y); } } -(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event { [objectA.physicsBody applyImpulse:CGVectorMake(0, 5)]; } -(void)update:(CFTimeInterval)currentTime { if(newPositionForObjectC.x > 0) { objectC.position = newPositionForObjectC; newPositionForObjectC = CGPointMake(0, 0); } } #end
Conditional Line Graph using Open Flash Charts
I am using Open Flash Charts v2. I have been trying to make Conditional line graph. But I couldn't find any straight forward way, example or any class for producing Conditional charts. Example of Conditional Graph So I thought to use some techniques to emulate conditional graph ,I made separate Line object for values above limit range and then this line is used to overlap the plotted line. This techniques works some what ok ,but there are problems with it, How to color or place the conditional colored line exactly above the limit. Remove tooltip and dot from limit line. Tooltip of conditional line(red) and plotted line(green) are both shown ,I only need tooltip of green line. Conditional Line Graph Problem illustrated Source Code: // C# var chart = new OpenFlashChart.OpenFlashChart(); var data1 = new List<double?> { 1, 3, 4, 5, 2, 1, 6, 7 };//>4= var overlap = new List<double?> { null, null, 4, 5, null, null, null, null }; var overlap2 = new List<double?> { null, null, null, null, null, null, 6, 7 }; var limitData = new List<double?> { 4, 4, 4, 4, 4, 4, 4, 4 }; var line1 = new Line(); line1.Values = data1; //line1.HaloSize = 0; line1.Width = 2; line1.DotSize = 5; line1.DotStyleType.Tip = "#x_label#<br>#val#"; line1.Colour = "#37c855"; line1.Tooltip = "#val#"; var overLine = new Line(); overLine.Values = overlap; //overLine.HaloSize = 0; overLine.Width = 2; overLine.DotSize = 5; overLine.DotStyleType.Tip = "#x_label#<br>#val#"; overLine.Colour = "#d81417"; overLine.Tooltip = "#val#"; var overLine2 = new Line(); overLine2.Values = overlap2; //overLine2.HaloSize = 0; overLine2.Width = 2; overLine2.DotSize = 5; //overLine2.DotStyleType.Tip = "#x_label#<br>#val#"; //overLine2.DotStyleType.Type = DotType.DOT; overLine2.Colour = "#d81417"; overLine2.Tooltip = "#val#"; var limit = new Line(); limit.Values = limitData; limit.Width = 2; limit.Colour = "#ff0000"; limit.HaloSize = -1; limit.DotSize = -1; // limit.DotStyleType.Tip = ""; limit.DotStyleType.Type = null; //limit.Tooltip = ""; chart.AddElement(line1); chart.AddElement(overLine); chart.AddElement(overLine2); chart.AddElement(limit); chart.Y_Legend = new Legend("Experiment"); chart.Title = new Title("Conditional Line Graph"); chart.Y_Axis.SetRange(0, 10); chart.X_Axis.Labels.Color = "#e43456"; chart.X_Axis.Steps = 4; chart.Tooltip = new ToolTip("#val#"); chart.Tooltip.Shadow = true; chart.Tooltip.Colour = "#e43456"; chart.Tooltip.MouseStyle = ToolTipStyle.CLOSEST; Response.Clear(); Response.CacheControl = "no-cache"; Response.Write(chart.ToPrettyString()); Response.End(); Note: I have already downloaded the OFC (Open Flash Charts) source ,If I modify the OFC Line.as source than how would I be able to generate json for the changed graph ? ,b/c I'm currently using .Net library for the json generation for OFC charts,please do let me know this also. Update: I have modified the source code on the advice of David Mears I'm using FlashDevelop for ActionScript. P.S: I'm open for ideas if another library can do this job.
If you don't mind a little rebuilding, you can get the source of OFC here and modify the Line.solid_line() method in open-flash-chart/charts/Line.as to do this fairly easily. In order to set the extra chart details through JSON using the .NET library, you'll also have to modify OpenFlashChart/LineBase.cs to add alternative colour and boundary properties. I'm not hugely familiar with .NET, but based on the existing properties you might add something like this: private double boundary; private string altcolour; [JsonProperty("boundary")] public virtual double Boundary { set { this.boundary = value; } get { return this.boundary; } } [JsonProperty("alt-colour")] public virtual string AltColour { set { this.altcolour = value; } get { return this.altcolour; } } Then I believe the following should work in Line.as: public function solid_line(): void { var first:Boolean = true; var i:Number; var tmp:Sprite; var x:Number; var y:Number; var last_e:Element; var ratio:Number; for ( i=0; i < this.numChildren; i++ ) { // Step through every child object. tmp = this.getChildAt(i) as Sprite; // Only include data Elements, ignoring extra children such as line masks. if( tmp is Element ) { var e:Element = tmp as Element; if( first ) { if (this.props.get('alt-colour') != Number.NEGATIVE_INFINITY) { if (e._y >= this.props.get_colour('boundary')) { // Line starts below boundary, set alt line colour. this.graphics.lineStyle( this.props.get_colour('width'), this.props.get_colour('alt-colour') ); } else { // Line starts above boundary, set normal line colour. this.graphics.lineStyle( this.props.get_colour('width'), this.props.get_colour('colour') ); } } // Move to the first point. this.graphics.moveTo(e.x, e.y); x = e.x; y = e.y; first = false; } else { if (this.props.get('alt-colour') != Number.NEGATIVE_INFINITY) { if (last_e._y < this.props.get_colour('boundary') && e._y >= this.props.get_colour('boundary')) { // Line passes below boundary. Draw first section and switch to alt colour. ratio = (this.props.get_colour('boundary') - last_e._y) / (e._y - last_e._y); this.graphics.lineTo(last_e.x + (e.x - last_e.x) * ratio, last_e.y + (e.y - last_e.y) * ratio); this.graphics.lineStyle( this.props.get_colour('width'), this.props.get_colour('alt-colour') ); } else if (last_e._y >= this.props.get_colour('boundary') && e._y < this.props.get_colour('boundary')) { // Line passes above boundary. Draw first section and switch to normal colour. ratio = (this.props.get_colour('boundary') - last_e._y) / (e._y - last_e._y); this.graphics.lineTo(last_e.x + (e.x - last_e.x) * ratio, last_e.y + (e.y - last_e.y) * ratio); this.graphics.lineStyle( this.props.get_colour('width'), this.props.get_colour('colour') ); } } // Draw a line to the next point. this.graphics.lineTo(e.x, e.y); } last_e = e; } } if ( this.props.get('loop') ) { // close the line loop (radar charts) this.graphics.lineTo(x, y); } } With the new open-flash-chart.swf, you should be able to just set your new properties on line1: line1.Boundary = 4; line1.AltColour = "#d81417";