Unable to access geometry array of points in pointcloud a-frame and threeJS - aframe

I have an a-frame entity with a component which is generating a pointcloud made of points in the init function but i am unable to access the points geometry
I had a demo working just using ThreeJS :
for ( let i = 0; i < mainContainer.children.length; i ++ ) {
const object = mainContainer.children[ i ];
if ( object instanceof THREE.Points ) {
I have set the pointCloud on entity
el.setObject3D('pointCloud', new THREE.Points( this.geometry, this.pointMaterial ));
i cannot access the geometry in the update function of the component, either using el.getObject3D('pointCloud') or using the id of the entity

As you set the object3D with setObject3D('pointCloud', new THREE.Points( ));, getObject3D('pointCloud') will retrieve the created THREE.Points instance.
The geometry is an attribute of the THREE.Points object:
<script src="https://aframe.io/releases/1.4.0/aframe.min.js"></script>
<script>
AFRAME.registerComponent("cloud", {
init: function() {
// create the points
const vertices = [];
for (let i = 0; i < 500; i++) {
const x = THREE.MathUtils.randFloatSpread(5);
const y = THREE.MathUtils.randFloatSpread(5);
const z = THREE.MathUtils.randFloatSpread(5);
vertices.push(x, y, z);
}
// create a geometry from the points
const geometry = new THREE.BufferGeometry();
geometry.setAttribute('position', new THREE.Float32BufferAttribute(vertices, 3));
// small grey points material
const material = new THREE.PointsMaterial({ color: 0x888888, size: 0.05 });
// setObject3D sets the points under the name 'cloud'
this.el.setObject3D("cloud", new THREE.Points(geometry, material))
},
update: function() {
// retrieve the THREE.Points object
const points = this.el.getObject3D("cloud");
// get it's geometry attribute
const geometry = points.geometry;
// log it
console.log(geometry)
}
})
</script>
<a-scene>
<a-entity position="0 1 -4" cloud></a-entity>
</a-scene>

Related

here map - add points on polyline to manipulate routing

I have to create with "here maps" routing with the possibility of manipulating it (eg: add drag on some points on a polyline, move it somewhere, add there next checkpoint, and recalculate the route). I have no problem with clicking on polyline and get coordination of new checkpoint. I know, more or less, how to add drag and drop to it. But however I try, I can't add new point on polyline.
I try to get something like that: https://wego.here.com/directions/drive/ulica-Kolejarzy,-44-102-Gliwice,-Poland:loc-dmVyc2lvbj0xO3RpdGxlPXVsaWNhK0tvbGVqYXJ6eTtsYW5nPXBsO2xhdD01MC4zMDYwNDtsb249MTguNjkwNztzdHJlZXQ9dWxpY2ErS29sZWphcnp5O2NpdHk9R2xpd2ljZTtwb3N0YWxDb2RlPTQ0LTEwMjtjb3VudHJ5PVBPTDtkaXN0cmljdD1HbGl3aWNlO3N0YXRlPVdvai4rJUM1JTlBbCVDNCU4NXNraWU7Y291bnR5PUdsaXdpY2U7Y2F0ZWdvcnlJZD1zdHJlZXQtc3F1YXJlO3NvdXJjZVN5c3RlbT1pbnRlcm5hbDtwZHNDYXRlZ29yeUlkPTkwMC05NDAwLTA0MDE/ulica-Mostowa,-43-600-Jaworzno,-Poland:loc-dmVyc2lvbj0xO3RpdGxlPXVsaWNhK01vc3Rvd2E7bGFuZz1wbDtsYXQ9NTAuMjA3Njk7bG9uPTE5LjI1NTc0O3N0cmVldD11bGljYStNb3N0b3dhO2NpdHk9SmF3b3J6bm87cG9zdGFsQ29kZT00My02MDA7Y291bnRyeT1QT0w7ZGlzdHJpY3Q9SmF3b3J6bm87c3RhdGU9V29qLislQzUlOUFsJUM0JTg1c2tpZTtjb3VudHk9SmF3b3J6bm87Y2F0ZWdvcnlJZD1zdHJlZXQtc3F1YXJlO3NvdXJjZVN5c3RlbT1pbnRlcm5hbDtwZHNDYXRlZ29yeUlkPTkwMC05NDAwLTA0MDE?map=50.25857,18.97158,11,normal . You know, thwn you can drag and drop this polyline and add more checkpoints
Belowe I put my code with some comments. There is method "manipulatePolylineOnClick" there I got new coordinates by click and want to add it to polyline. But I really don't know how.
export function createRoute(origin, destination, via, platform, map) {
let routingParameters = {
'routingMode': 'fast',
'transportMode': 'truck',
'return': 'polyline'
};
setRoutingParameters(routingParameters, origin, destination, via)
var router = platform.getRoutingService(null, 8);
var onResult = function (result) {
if (result.routes.length) {
result.routes[0].sections.forEach((section) => {
//create polyline
let linestring = H.geo.LineString.fromFlexiblePolyline(section.polyline);
let routeLine = new H.map.Polyline(linestring, {
style: {strokeColor: 'red', lineWidth: 3}
});
// needed to polyline drag and drop
let verticeGroup = new H.map.Group({
visibility: false
});
let mainGroup = new H.map.Group({
volatility: true, // mark the group as volatile for smooth dragging of all it's objects
objects: [routeLine, verticeGroup]
});
let polylineTimeout;
routeLine.draggable = true;
//my method that should add poitnts to manimupate route, and allow to do it
manipulatePolylineOnClick(routeLine, map, verticeGroup)
let startMarker = new H.map.Marker(section.departure.place.location);
let endMarker = new H.map.Marker(section.arrival.place.location);
map.addObjects([routeLine, startMarker, endMarker]);
map.getViewModel().setLookAtData({bounds: routeLine.getBoundingBox()});
});
}
};
router.calculateRoute(routingParameters, onResult,
function(error) {
alert(error.message);
});
}
function setRoutingParameters(routingParameters, origin, destination, via) {
routingParameters['origin'] = origin
routingParameters['destination'] = destination
}
function manipulatePolylineOnClick(polyline, map, verticeGroup) {
const svgCircle = '<svg width="20" height="20" version="1.1" xmlns="http://www.w3.org/2000/svg">' +
'<circle cx="10" cy="10" r="7" fill="transparent" stroke="red" stroke-width="4"/>' +
'</svg>';
// so, I have new coordinates on click in coord variable. But what I have to do, to add it to polyline?
polyline.addEventListener('tap', function (e) {
let coord = map.screenToGeo(e.currentPointer.viewportX, e.currentPointer.viewportY);
//it's not working
let vertice = new H.map.Marker(
coord,
{
icon: new H.map.Icon(svgCircle, {anchor: {x: 20, y: 20}})
}
);
vertice.draggable = true;
// vertice.setData({'verticeIndex': index})
verticeGroup.addObject(vertice);
console.log('coord', coord)
})
}

addrow styling in Label

I am adding custom formtable like below
return builder
.addRow(name)
.addRow(price)
Is there anyway to style the row ? I want to display the price with big font and different color.
thank you.
Here is an example of simple custom autoCursor
// Extract required parts from LightningChartJS.
const {
lightningChart,
AutoCursorModes,
UIElementBuilders,
UILayoutBuilders,
UIBackgrounds,
ColorHEX,
SolidFill,
SolidLine,
UIOrigins,
translatePoint,
} = lcjs;
// Import data-generator from 'xydata'-library.
const {
createProgressiveTraceGenerator
} = xydata
// colors of the series
const colors = ["#fc03f0", "#1cb843", "#eeff00", "#0955ff", "#500c3f"];
// Create a XY Chart.
const chart = lightningChart()
.ChartXY({
// theme: Themes.dark
})
// Disable native AutoCursor to create custom
.setAutoCursorMode(AutoCursorModes.disabled)
.setTitle("Custom Cursor using LCJS UI");
// set title for Y axis
chart.getDefaultAxisY().setTitle("Y-axis");
// generate data and creating the series
const series = chart.addLineSeries().setStrokeStyle(
new SolidLine({
fillStyle: new SolidFill({ color: ColorHEX(colors[0]) }),
thickness: 2,
})
);
createProgressiveTraceGenerator()
.setNumberOfPoints(200)
.generate()
.toPromise()
.then((data) => {
return series.add(data);
});
// Create UI elements for custom cursor.
const resultTable = chart
.addUIElement(
UILayoutBuilders.Column.setBackground(UIBackgrounds.Rectangle),
{
x: chart.getDefaultAxisX(),
y: chart.getDefaultAxisY(),
}
)
.setMouseInteractions(false)
.setOrigin(UIOrigins.LeftBottom)
.setMargin(2)
.setBackground((background) =>
background.setStrokeStyle(
new SolidLine({
thickness: 1,
fillStyle: new SolidFill({ color: ColorHEX("#c9bab9") }),
})
)
);
const rowX = resultTable
.addElement(UILayoutBuilders.Row)
.addElement(UIElementBuilders.TextBox)
.setTextFont((font) => font.setSize(15))
.setTextFillStyle(new SolidFill({ color: ColorHEX(colors[1]) }));
const rowY = resultTable
.addElement(UILayoutBuilders.Row)
.addElement(UIElementBuilders.TextBox)
.setTextFont((font) => font.setSize(15))
.setTextFillStyle(new SolidFill({ color: ColorHEX(colors[2]) }));
// Hide custom cursor components initially.
resultTable.dispose();
// Implement custom cursor logic with events.
chart.onSeriesBackgroundMouseMove((_, event) => {
const mouseLocationClient = { x: event.clientX, y: event.clientY };
// Translate mouse location to LCJS coordinate system for solving data points from series, and translating to Axes.
const mouseLocationEngine = chart.engine.clientLocation2Engine(
mouseLocationClient.x,
mouseLocationClient.y
);
// Translate mouse location to Axis.
const mouseLocationAxis = translatePoint(
mouseLocationEngine,
chart.engine.scale,
series.scale
);
// Solve nearest data point to the mouse on each series.
const nearestDataPoints = series.solveNearestFromScreen(mouseLocationEngine)
if (nearestDataPoints) {
// Set custom cursor location.
resultTable.setPosition({
x: nearestDataPoints.location.x,
y: nearestDataPoints.location.y,
});
// set Origin of resultTable
if ( nearestDataPoints.location.x > chart.getDefaultAxisX().getInterval().end / 1.5 ) {
if (nearestDataPoints.location.y >chart.getDefaultAxisY().getInterval().end / 1.5) {
resultTable.setOrigin(UIOrigins.RightTop);
} else {
resultTable.setOrigin(UIOrigins.RightBottom);
}
} else if ( nearestDataPoints.location.y > chart.getDefaultAxisY().getInterval().end / 1.5) {
resultTable.setOrigin(UIOrigins.LeftTop);
} else {
resultTable.setOrigin(UIOrigins.LeftBottom);
}
// Format result table text.
rowX.setText(`X: ${nearestDataPoints.location.x.toFixed(1)}`);
rowY.setText(`Y: ${nearestDataPoints.location.y.toFixed(1)}`)
// Display cursor.
resultTable.restore();
} else {
// Hide cursor.
resultTable.dispose();
}
});
chart.onSeriesBackgroundMouseLeave((_, e) => {
resultTable.dispose();
});
<script src="https://unpkg.com/#arction/xydata#1.4.0/dist/xydata.iife.js"></script>
<script src="https://unpkg.com/#arction/lcjs#3.0.0/dist/lcjs.iife.js"></script>
There is no existing built-in method for customizing each row/column of result table apart from the text content.
This is definitely a feature that we are eventually including in the library, as soon as it becomes a priority or a customer requests it.
Right now, as a community user, it can be done by creating a custom cursor. Unfortunately there aren't many examples on this subject.
The general idea is solving the nearest data point from mouse location with ChartXY.solveNearest (or calculating by some custom method), and showing the custom cursor using UI elements and custom ticks if desired.
The ResultTable can be created by combining Column and Row layouts to get a grid, and then adding TextBox elements, whose font/color you can style individually.
EDIT: The official examples for custom cursors have been released.
You can find them there https://www.arction.com/lightningchart-js-interactive-examples/search.html?t=cursor
There is one example with LCJS UI elements and another with dynamic HTML & CSS (same approach could be used with some external UI framework).

Build a-entity hierachy according to glTF model hierarchy builder

I'm testing some glTF parser modifications in order to build an a-entity hierarchy according to the 3D objects hierarchy of the glTF scene.
It works well but then I can't change the position/orientation/scale of the a-entity childrens, from the A-Frame inspector widgets. Only the a-entity root can be moved.
Would you know what is missing?
GLTFParser.prototype.loadScenes = function () {
var json = this.json;
var extensions = this.extensions;
// scene node hierachy builder
// Geff
function buildNodeHierachy( nodeId, parentObject, allNodes ) {
var _node = allNodes[ nodeId ];
parentObject.add( _node );
if ( _node.type == 'Object3D' && _node != undefined) {
var entityChild = AFRAME.INSPECTOR.createNewEntity({element: 'a-entity', components: {}});
entityChild.setAttribute('visible', _node.visible);
entityChild.setAttribute('position', _node.position);
entityChild.setAttribute('rotation', _node.rotation);
entityChild.setAttribute('scale', _node.scale);
if(parentObject.type == 'Scene')parentObject.sceneRoot = _node;
entityChild.setObject3D('mesh', _node);
entityChild.id = _node.name;
//}
if(parentObject.el != undefined){
parentObject.el.insertBefore(entityChild, null);
parentObject.el.emit('child-attached', entityChild);
}
_node.parent = parentObject;
entityChild.emit('object3dset', {object: _node, type: 'mesh'});
entityChild.emit('model-loaded', {format: 'gltf', model: _node});
}
var node = json.nodes[ nodeId ];
if ( node.children ) {
var children = node.children;
for ( var i = 0, l = children.length; i < l; i ++ ) {
var child = children[ i ];
buildNodeHierachy( child, _node, allNodes );
}
}
}
return this._withDependencies
The Inspector should not be involved at all in the code. To create an entity:
document.createElement('a-entity');
https://aframe.io/docs/0.6.0/introduction/javascript-events-dom-apis.html#creating-an-entity-with-createelement

Rendering from two cameras at the same time in A-Frame

the recent v0.3.0 blog post mentions WebVR 1.0 support allowing "us to have different content on the desktop display than the headset, opening the door for asynchronous gameplay and spectator modes." This is precisely what I'm trying to get working. I'm looking to have one camera in the scene represent the viewpoint of the HMD and a secondary camera represent a spectator of the same scene and render that view to a canvas on the same webpage. 0.3.0 removes the ability to render a-scene to a specific canvas in favor of embedded component. Any thoughts on how to accomplish two cameras rendering a single scene simultaneously?
My intention is to have a the desktop display show what a user is doing from a different perspective. My end goal is to be able to build a mixed reality green screen component.
While there may be a better or cleaner way to do this in the future, I was able to get a second camera rendering by looking at examples of how this is done in the THREE.js world.
I add a component to a non-active camera called spectator. in the init function I set up a new renderer and attach to div outside the scene to create a new canvas. I then call the render method inside the tick() part of the lifecycle.
I have not worked out how to isolate the movement of this camera yet. The default look controls of the 0.3.0 aframe scene still control both camera
Source code:
https://gist.github.com/derickson/334a48eb1f53f6891c59a2c137c180fa
I've created a set of components that can help with this. https://github.com/diarmidmackenzie/aframe-multi-camera
Here's an example showing usage with A-Frame 1.2.0 to display the main camera on the left half of the screen, and a secondary camera on the right half.
<!DOCTYPE html>
<html>
<head>
<script src="https://aframe.io/releases/1.2.0/aframe.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/diarmidmackenzie/aframe-multi-camera#latest/src/multi-camera.min.js"></script>
</head>
<body>
<div>
<a-scene>
<a-entity camera look-controls wasd-controls position="0 1.6 0">
<!-- first secondary camera is a child of the main camera, so that it always has the same position / rotation -->
<!-- replace main camera (since main camera is rendered across the whole screen, which we don't want) -->
<a-entity
id="camera1"
secondary-camera="outputElement:#viewport1;sequence: replace"
>
</a-entity>
</a-entity>
<!-- PUT YOUR SCENE CONTENT HERE-->
<!-- position of 2nd secondary camera-->
<a-entity
id="camera2"
secondary-camera="outputElement:#viewport2"
position="8 1.6 -6"
rotation="0 90 0"
>
</a-entity>
</a-scene>
</div>
<!-- standard HTML to contrl layout of the two viewports-->
<div style="width: 100%; height:100%; display: flex">
<div id="viewport1" style="width: 50%; height:100%"></div>
<div id="viewport2" style="width: 50%; height:100%"></div>
</div>
</body>
</html>
Also here as a glitch: https://glitch.com/edit/#!/recondite-polar-hyssop
It's also been suggested that I post the entire source code for the multi-camera component here.
Here it is...
/* System that supports capture of the the main A-Frame render() call
by add-render-call */
AFRAME.registerSystem('add-render-call', {
init() {
this.render = this.render.bind(this);
this.originalRender = this.el.sceneEl.renderer.render;
this.el.sceneEl.renderer.render = this.render;
this.el.sceneEl.renderer.autoClear = false;
this.preRenderCalls = [];
this.postRenderCalls = [];
this.suppresssDefaultRenderCount = 0;
},
addPreRenderCall(render) {
this.preRenderCalls.push(render)
},
removePreRenderCall(render) {
const index = this.preRenderCalls.indexOf(render);
if (index > -1) {
this.preRenderCalls.splice(index, 1);
}
},
addPostRenderCall(render) {
this.postRenderCalls.push(render)
},
removePostRenderCall(render) {
const index = this.postRenderCalls.indexOf(render);
if (index > -1) {
this.postRenderCalls.splice(index, 1);
}
else {
console.warn("Unexpected failure to remove render call")
}
},
suppressOriginalRender() {
this.suppresssDefaultRenderCount++;
},
unsuppressOriginalRender() {
this.suppresssDefaultRenderCount--;
if (this.suppresssDefaultRenderCount < 0) {
console.warn("Unexpected unsuppression of original render")
this.suppresssDefaultRenderCount = 0;
}
},
render(scene, camera) {
renderer = this.el.sceneEl.renderer
// set up THREE.js stats to correctly count across all render calls.
renderer.info.autoReset = false;
renderer.info.reset();
this.preRenderCalls.forEach((f) => f());
if (this.suppresssDefaultRenderCount <= 0) {
this.originalRender.call(renderer, scene, camera)
}
this.postRenderCalls.forEach((f) => f());
}
});
/* Component that captures the main A-Frame render() call
and adds an additional render call.
Must specify an entity and component that expose a function call render(). */
AFRAME.registerComponent('add-render-call', {
multiple: true,
schema: {
entity: {type: 'selector'},
componentName: {type: 'string'},
sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'}
},
init() {
this.invokeRender = this.invokeRender.bind(this);
},
update(oldData) {
// first clean up any old settings.
this.removeSettings(oldData)
// now add new settings.
if (this.data.sequence === "before") {
this.system.addPreRenderCall(this.invokeRender)
}
if (this.data.sequence === "replace") {
this.system.suppressOriginalRender()
}
if (this.data.sequence === "after" ||
this.data.sequence === "replace")
{
this.system.addPostRenderCall(this.invokeRender)
}
},
remove() {
this.removeSettings(this.data)
},
removeSettings(data) {
if (data.sequence === "before") {
this.system.removePreRenderCall(this.invokeRender)
}
if (data.sequence === "replace") {
this.system.unsuppressOriginalRender()
}
if (data.sequence === "after" ||
data.sequence === "replace")
{
this.system.removePostRenderCall(this.invokeRender)
}
},
invokeRender()
{
const componentName = this.data.componentName;
if ((this.data.entity) &&
(this.data.entity.components[componentName])) {
this.data.entity.components[componentName].render(this.el.sceneEl.renderer, this.system.originalRender);
}
}
});
/* Component to set layers via HTML attribute. */
AFRAME.registerComponent('layers', {
schema : {type: 'number', default: 0},
init: function() {
setObjectLayer = function(object, layer) {
if (!object.el ||
!object.el.hasAttribute('keep-default-layer')) {
object.layers.set(layer);
}
object.children.forEach(o => setObjectLayer(o, layer));
}
this.el.addEventListener("loaded", () => {
setObjectLayer(this.el.object3D, this.data);
});
if (this.el.hasAttribute('text')) {
this.el.addEventListener("textfontset", () => {
setObjectLayer(this.el.object3D, this.data);
});
}
}
});
/* This component has code in common with viewpoint-selector-renderer
However it's a completely generic stripped-down version, which
just delivers the 2nd camera function.
i.e. it is missing:
- The positioning of the viewpoint-selector entity.
- The cursor / raycaster elements.
*/
AFRAME.registerComponent('secondary-camera', {
schema: {
output: {type: 'string', oneOf: ['screen', 'plane'], default: 'screen'},
outputElement: {type: 'selector'},
cameraType: {type: 'string', oneOf: ['perspective, orthographic'], default: 'perspective'},
sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'},
quality: {type: 'string', oneOf: ['high, low'], default: 'high'}
},
init() {
if (!this.el.id) {
console.error("No id specified on entity. secondary-camera only works on entities with an id")
}
this.savedViewport = new THREE.Vector4();
this.sceneInfo = this.prepareScene();
this.activeRenderTarget = 0;
// add the render call to the scene
this.el.sceneEl.setAttribute(`add-render-call__${this.el.id}`,
{entity: `#${this.el.id}`,
componentName: "secondary-camera",
sequence: this.data.sequence});
// if there is a cursor on this entity, set it up to read this camera.
if (this.el.hasAttribute('cursor')) {
this.el.setAttribute("cursor", "canvas: user; camera: user");
this.el.addEventListener('loaded', () => {
this.el.components['raycaster'].raycaster.layers.mask = this.el.object3D.layers.mask;
const cursor = this.el.components['cursor'];
cursor.removeEventListeners();
cursor.camera = this.camera;
cursor.canvas = this.data.outputElement;
cursor.canvasBounds = cursor.canvas.getBoundingClientRect();
cursor.addEventListeners();
cursor.updateMouseEventListeners();
});
}
if (this.data.output === 'plane') {
if (!this.data.outputElement.hasLoaded) {
this.data.outputElement.addEventListener("loaded", () => {
this.configureCameraToPlane()
});
} else {
this.configureCameraToPlane()
}
}
},
configureCameraToPlane() {
const object = this.data.outputElement.getObject3D('mesh');
function nearestPowerOf2(n) {
return 1 << 31 - Math.clz32(n);
}
// 2 * nearest power of 2 gives a nice look, but at a perf cost.
const factor = (this.data.quality === 'high') ? 2 : 1;
const width = factor * nearestPowerOf2(window.innerWidth * window.devicePixelRatio);
const height = factor * nearestPowerOf2(window.innerHeight * window.devicePixelRatio);
function newRenderTarget() {
const target = new THREE.WebGLRenderTarget(width,
height,
{
minFilter: THREE.LinearFilter,
magFilter: THREE.LinearFilter,
stencilBuffer: false,
generateMipmaps: false
});
return target;
}
// We use 2 render targets, and alternate each frame, so that we are
// never rendering to a target that is actually in front of the camera.
this.renderTargets = [newRenderTarget(),
newRenderTarget()]
this.camera.aspect = object.geometry.parameters.width /
object.geometry.parameters.height;
},
remove() {
this.el.sceneEl.removeAttribute(`add-render-call__${this.el.id}`);
if (this.renderTargets) {
this.renderTargets[0].dispose();
this.renderTargets[1].dispose();
}
// "Remove" code does not tidy up adjustments made to cursor component.
// rarely necessary as cursor is typically put in place at the same time
// as the secondary camera, and so will be disposed of at the same time.
},
prepareScene() {
this.scene = this.el.sceneEl.object3D;
const width = 2;
const height = 2;
if (this.data.cameraType === "orthographic") {
this.camera = new THREE.OrthographicCamera( width / - 2, width / 2, height / 2, height / - 2, 1, 1000 );
}
else {
this.camera = new THREE.PerspectiveCamera( 45, width / height, 1, 1000);
}
this.scene.add(this.camera);
return;
},
render(renderer, renderFunction) {
// don't bother rendering to screen in VR mode.
if (this.data.output === "screen" && this.el.sceneEl.is('vr-mode')) return;
var elemRect;
if (this.data.output === "screen") {
const elem = this.data.outputElement;
// get the viewport relative position of this element
elemRect = elem.getBoundingClientRect();
this.camera.aspect = elemRect.width / elemRect.height;
}
// Camera position & layers match this entity.
this.el.object3D.getWorldPosition(this.camera.position);
this.el.object3D.getWorldQuaternion(this.camera.quaternion);
this.camera.layers.mask = this.el.object3D.layers.mask;
this.camera.updateProjectionMatrix();
if (this.data.output === "screen") {
// "bottom" position is relative to the whole viewport, not just the canvas.
// We need to turn this into a distance from the bottom of the canvas.
// We need to consider the header bar above the canvas, and the size of the canvas.
const mainRect = renderer.domElement.getBoundingClientRect();
renderer.getViewport(this.savedViewport);
renderer.setViewport(elemRect.left - mainRect.left,
mainRect.bottom - elemRect.bottom,
elemRect.width,
elemRect.height);
renderFunction.call(renderer, this.scene, this.camera);
renderer.setViewport(this.savedViewport);
}
else {
// target === "plane"
// store off current renderer properties so that they can be restored.
const currentRenderTarget = renderer.getRenderTarget();
const currentXrEnabled = renderer.xr.enabled;
const currentShadowAutoUpdate = renderer.shadowMap.autoUpdate;
// temporarily override renderer proeperties for rendering to a texture.
renderer.xr.enabled = false; // Avoid camera modification
renderer.shadowMap.autoUpdate = false; // Avoid re-computing shadows
const renderTarget = this.renderTargets[this.activeRenderTarget];
renderTarget.texture.encoding = renderer.outputEncoding;
renderer.setRenderTarget(renderTarget);
renderer.state.buffers.depth.setMask( true ); // make sure the depth buffer is writable so it can be properly cleared, see #18897
renderer.clear();
renderFunction.call(renderer, this.scene, this.camera);
this.data.outputElement.getObject3D('mesh').material.map = renderTarget.texture;
// restore original renderer settings.
renderer.setRenderTarget(currentRenderTarget);
renderer.xr.enabled = currentXrEnabled;
renderer.shadowMap.autoUpdate = currentShadowAutoUpdate;
this.activeRenderTarget = 1 - this.activeRenderTarget;
}
}
});

AmCharts map - insert html

I'm trying to create a pulse effect on a point on a AmCharts Map. In order to do this I need to insert HTML at a latitude and longitude point but can't work out how to do it through the api (http://docs.amcharts.com/3/javascriptmaps/)
Here is the effect I'm trying to achieve - http://kevinurrutia.tumblr.com/post/16411271583/creating-a-css3-pulsating-circle
Here is a jsfiddle of the map with the HTML and CSS http://jsfiddle.net/9cBXh/2/
// request #3275
var dataPoints = [{
latitude: '51.000000000000',
longitude: '9.000000000000',
type: 'bubble',
color: '#cc0000',
fixedSize: false,
alpha: 0.9,
height: 30,
width: 30,
centered: true,
id: 'test'
}];
AmCharts.ready(function() {
// create AmMap object
var map = new AmCharts.AmMap();
// set path to images
map.pathToImages = "http://www.ammap.com/lib/images/";
var dataProvider = {
mapVar: AmCharts.maps.worldLow,
getAreasFromMap:false,
images: dataPoints
};
// pass data provider to the map object
map.dataProvider = dataProvider;
map.areasSettings = {
autoZoom: true,
selectedColor: "#CC0000"
};
// write the map to container div
map.write("mapdiv");
});
The red dot is the bubble generated through the api. The blue dot and circle is the html I need to insert at the lat and long co-ordinates...somehow!
Any help would be appreciated.
Here's a complete working example of the AmCharts map with several pulsating HTML elements as map markers:
http://www.amcharts.com/demos/custom-html-elements-map-markers/
(You can view the source by clicking on the EDIT button)
The idea is very simple:
Trap "positionChanged" event. Go throiugh all of the "images" in the map's dataProvider, create HTML elements for each of those, then position them directly over the map by using API functions that resolve longitude/latitude coordinates to screen top/left coordinates:
// add events to recalculate map position when the map is moved or zoomed
map.addListener("positionChanged", updateCustomMarkers);
// this function will take current images on the map and create HTML elements for them
function updateCustomMarkers (event) {
// get map object
var map = event.chart;
// go through all of the images
for( var x in map.dataProvider.images) {
// get MapImage object
var image = map.dataProvider.images[x];
// check if it has corresponding HTML element
if ('undefined' == typeof image.externalElement)
image.externalElement = createCustomMarker(image);
// reposition the element accoridng to coordinates
image.externalElement.style.top = map.latitudeToY(image.latitude) + 'px';
image.externalElement.style.left = map.longitudeToX(image.longitude) + 'px';
}
}
// this function creates and returns a new marker element
function createCustomMarker(image) {
// create holder
var holder = document.createElement('div');
holder.className = 'map-marker';
holder.title = image.title;
holder.style.position = 'absolute';
// create dot
var dot = document.createElement('div');
dot.className = 'dot';
holder.appendChild(dot);
// create pulse
var pulse = document.createElement('div');
pulse.className = 'pulse';
holder.appendChild(pulse);
// append the marker to the map container
image.chart.chartDiv.appendChild(holder);
return holder;
}

Resources