I am using react-konva and I want to crop my selected image when edit button clicked.
Can anyone please guide me how I can achieve this ?
this is the Rect I am using to crop the portion of the image.
Here in this code onShapeChange function saves the crop value of the image in
canvas editor.
{(isCropping &&
<>
{React.createElement(`Rect`, {
ref: cropRef,
key: selectedShape.id,
id: selectedShape.id,
...selectedShape.attributes,
draggable: false,
onTransformEnd: (e) => {
const node = cropRef.current;
const scaleX = node.scaleX();
const scaleY = node.scaleY();
node.scaleX(1);
node.scaleY(1);
const newShape = {
...selectedShape,
attributes:
{
...selectedShape.attributes,
crop: {
x: node.x() - selectedShape.attributes.x,
y: node.y() - selectedShape.attributes.y,
// width: this.state.rect.attrs.width,
// height: this.state.rect.attrs.height
// x: node.x(),
// y: node.y(),
width: Math.max(5, node.width() * scaleX),
height: Math.max(node.height() * scaleY),
}
}
}
console.log('newShape in cropper', newShape, 'SelectedShape', selectedShape);
onShapeChange({
id: selectedShape.id,
index: selectedReportItem.index,
reportIndex: selectedReportItem.reportIndex,
newItem: newShape,
})
setIsCropping(false);
}
}, null)}
<Transformer
ref={croptrRef}
rotateEnabled={false}
flipEnabled={false}
boundBoxFunc={(oldBox, newBox) => {
// limit resize
if (newBox.width < 5 || newBox.height < 5) {
return oldBox;
}
return newBox;
}}
/>
</>
}
Related
I am using the marker from current example but with vertical oriented chart and a few my upgrades. So my problem that in case of vertical chart the labels and values of variables isn't shown. But the same logic is properly works with horizontal oriented chart.
private createCustomMarker(): void {
if (!this.seriesInstances.length) return;
const resultTable: UIElementColumn<UIBackground> = this.chartInstance
.addUIElement(UILayoutBuilders.Column, {
x: this.chartInstance.getDefaultAxisX(),
y: this.chartInstance.getDefaultAxisY()
})
.setMouseInteractions(false)
.setOrigin(UIOrigins.LeftCenter)
.setMargin(5);
const datetimeRow: UITextBox<UIBackground> = resultTable
.addElement(UILayoutBuilders.Row)
.addElement(UIElementBuilders.TextBox);
const rowsY: UITextBox<UIBackground>[] = this.seriesInstances
.map((el: ISeriesInstance, i: number) => {
return resultTable
.addElement(UILayoutBuilders.Row)
.addElement(UIElementBuilders.TextBox)
.setTextFillStyle(this.seriesInstances[i].instance.getStrokeStyle().getFillStyle());
});
const tick: CustomTick = (this.isAppearanceHorizontal ? this.chartInstance.getDefaultAxisX() : this.chartInstance.getDefaultAxisY())
.addCustomTick()
.setAllocatesAxisSpace(false)
.disposeMarker();
// Hide custom cursor components initially.
resultTable.dispose();
tick.dispose();
this.chartInstance.onSeriesBackgroundMouseMove((_: ChartXY<PointMarker, UIBackground>, event: MouseEvent): void => {
const mouseLocationClient: { x: number; y: number } = { x: event.clientX, y: event.clientY };
const mouseLocationEngine: Point = this.chartInstance.engine.clientLocation2Engine(
mouseLocationClient.x,
mouseLocationClient.y
);
// Translate mouse location to LCJS coordinate system for solving data points from series, and translating to Axes.
// Translate mouse location to Axis.
const mouseLocationAxis: Point = translatePoint(
mouseLocationEngine,
this.chartInstance.engine.scale,
this.seriesInstances[0].instance.scale
);
// Solve the nearest data point to the mouse on each series.
const nearestDataPoints: CursorPoint<Series2D>[] = this.seriesInstances.map((el: ISeriesInstance) => {
return el.instance.solveNearestFromScreen(mouseLocationEngine) // on this line the most of elements have undefined, but data for it exists and poits are near beetween each other
});
// console.log(nearestDataPoints);
// Find the nearest solved data point to the mouse.
const nearestPoint: CursorPoint<Series2D> = nearestDataPoints.reduce((prev: CursorPoint<Series2D>, curr: CursorPoint<Series2D>) => {
if (!prev) return curr;
if (!curr) return prev;
if (this.isAppearanceHorizontal) {
return Math.abs(mouseLocationAxis.y - curr.location.y) < Math.abs(mouseLocationAxis.y - prev.location.y) ? curr : prev;
} else {
return Math.abs(mouseLocationAxis.x - curr.location.x) < Math.abs(mouseLocationAxis.x - prev.location.x) ? curr : prev
}
});
if (nearestPoint) {
// Set custom cursor location.
resultTable.setPosition({
x: mouseLocationAxis.x,
y: mouseLocationAxis.y,
});
// Change origin of result table based on cursor location.
let resultTableOrigin;
const yScale: number = this.chartInstance.engine.scale.y.getInnerInterval();
const isResultTableOriginXRight: boolean = mouseLocationEngine.x > this.chartInstance.engine.scale.x.getInnerInterval() / 2;
if (mouseLocationEngine.y > yScale - (yScale / 100 * 30)) { // mouseLocationEngine.y > yScale - 30%
resultTableOrigin = isResultTableOriginXRight ? UIOrigins.RightTop : UIOrigins.LeftTop;
} else if (mouseLocationEngine.y < yScale / 100 * 30) { // mouseLocationEngine.y > 30% of yScale
resultTableOrigin = isResultTableOriginXRight ? UIOrigins.RightBottom : UIOrigins.LeftBottom;
} else {
resultTableOrigin = isResultTableOriginXRight ? UIOrigins.RightCenter : UIOrigins.LeftCenter;
}
resultTable.setOrigin(resultTableOrigin);
// Format result table text.
const datetimeValue = this.isAppearanceHorizontal
? this.chartInstance.getDefaultAxisX().formatValue(nearestPoint.location.x)
: this.chartInstance.getDefaultAxisY().formatValue(nearestPoint.location.y)
datetimeRow.setText(`${datetimeValue}`);
rowsY.map((rowY: UITextBox<UIBackground>, i: number) => {
// this.seriesInstances[i].instance.isDisposed() ? rowY.dispose() : rowY.restore(); after this line labels of the table is low font contrast
if (nearestDataPoints[i]?.location) {
const foundSeries = chain(this.track.series)
.flatMap()
.value()[i]
const value: string = this.isAppearanceHorizontal
? this.chartInstance.getDefaultAxisY().formatValue(nearestDataPoints[i].location.y)
: this.chartInstance.getDefaultAxisX().formatValue(nearestDataPoints[i].location.x)
rowY.setText(`${this.seriesInstances[i].instance.getName()}: ${value} ${foundSeries.unit}`) // probleblem on this line
}
});
tick.setValue(
this.isAppearanceHorizontal
? nearestPoint.location.x
: nearestPoint.location.y
);
resultTable.restore();
tick.restore();
} else {
resultTable.dispose();
tick.dispose();
}
});
this.chartInstance.onSeriesBackgroundMouseLeave(() => {
resultTable.dispose();
tick.dispose();
});
this.chartInstance.onSeriesBackgroundMouseDragStart(() => {
resultTable.dispose();
tick.dispose();
});
}
I want to understand why almost the same code don't work on vertical oriented chart, but on horizontal works good.
I want to make a slide show in framer motion and I found that in framer motion docs they have an example slide show like this https://codesandbox.io/s/framer-motion-image-gallery-pqvx3?from-embed=&file=/src/Example.tsx, but I found a bug when we drag and double click it, it will be stuck like this picture .
import * as React from "react";
import { useState } from "react";
import { motion, AnimatePresence } from "framer-motion";
import { wrap } from "popmotion";
import { images } from "./image-data";
const variants = {
enter: (direction: number) => {
return {
x: direction > 0 ? 1000 : -1000,
opacity: 0
};
},
center: {
zIndex: 1,
x: 0,
opacity: 1
},
exit: (direction: number) => {
return {
zIndex: 0,
x: direction < 0 ? 1000 : -1000,
opacity: 0
};
}
};
const swipeConfidenceThreshold = 10000;
const swipePower = (offset: number, velocity: number) => {
return Math.abs(offset) * velocity;
};
export const Example = () => {
const [[page, direction], setPage] = useState([0, 0]);images.
const imageIndex = wrap(0, images.length, page);
const paginate = (newDirection: number) => {
setPage([page + newDirection, newDirection]);
};
return (
<>
<AnimatePresence initial={false} custom={direction}>
<motion.img
key={page}
src={images[imageIndex]}
custom={direction}
variants={variants}
initial="enter"
animate="center"
exit="exit"
transition={{
x: { type: "spring", stiffness: 300, damping: 30 },
opacity: { duration: 0.2 }
}}
drag="x"
dragConstraints={{ left: 0, right: 0 }}
dragElastic={1}
onDragEnd={(e, { offset, velocity }) => {
const swipe = swipePower(offset.x, velocity.x);
if (swipe < -swipeConfidenceThreshold) {
paginate(1);
} else if (swipe > swipeConfidenceThreshold) {
paginate(-1);
}
}}
/>
</AnimatePresence>
</>
);
};
I try to solve this problem but still can't fix it, can someone help me?
This looks like a bug of framer-motion.
Up until v1.6.2, everything works fine. The bug seems to occur in all later versions.
There is also an interesting changelog:
[1.6.3] 2019-08-19
Fixed
Ensuring onDragEnd always fires after if onDragStart fired.
Here is a link to the related issue on GitHub, opened by the author of this question.
Until that bug is fixed, here is a workaround that uses Pan events
export default function Carousel() {
const animationConfidenceThreshold = 200; // you have to move the element 200px in order to perform an animation
const [displayed, setDisplayed] = useState(0); // the index of the displayed element
const xOffset = useMotionValue(0); // this is the motion value that drives the translation
const lastOffset = useRef(0); // this is the lastValue of the xOffset after the Pan ended
const elementAnimatingIn = useRef(false); // this will be set to true whilst a new element is performing its animation to the center
useEffect(() => {
// this happens after we have dragged the element out and triggered a rerender
if (elementAnimatingIn.current) {
const rightPan = xOffset.get() > 0; // check if the user drags it to the right
// if the element has animated out to the right it animates in from the left
xOffset.set(
rightPan ? -1 * window.innerWidth - 200 : window.innerWidth + 200
);
// perform the animation to the center
animate(xOffset, 0, {
duration: 0.5,
onComplete: () => {
xOffset.stop();
},
onStop: () => {
elementAnimatingIn.current = false;
lastOffset.current = xOffset.get();
}
});
}
});
return (
<div className="container">
<motion.div
className="carouselElement"
onPan={(e, info) => {
xOffset.set(lastOffset.current + info.offset.x); // set the xOffset to the current offset of the pan + the prev offset
}}
style={{ x: xOffset }}
onPanStart={() => {
// check if xOffset is animating, if true stop animation and set lastOffset to current xOffset
if (xOffset.isAnimating()) {
xOffset.stop();
lastOffset.current = xOffset.get();
}
}}
onPanEnd={(e, info) => {
// there can be a difference between the info.offset.x in onPan and onPanEnd
// so we will set the xOffset to the info.offset.x when the pan ends
xOffset.set(lastOffset.current + info.offset.x);
lastOffset.current = xOffset.get(); // set the lastOffset to the current xOffset
if (Math.abs(lastOffset.current) < animationConfidenceThreshold) {
// if its only a small movement, animate back to the initial position
animate(xOffset, 0, {
onComplete: () => {
lastOffset.current = 0;
}
});
} else {
// perform the animation to the next element
const rightPan = xOffset.get() > 0; // check if the user drags it to the right
animate(
xOffset,
rightPan ? window.innerWidth + 200 : -1 * window.innerWidth - 200, // animate out of view
{
duration: 0.5,
onComplete: () => {
// after the element has animated out
// stop animation (it does not do this on its own, only one animation can happen at a time)
xOffset.stop();
elementAnimatingIn.current = true;
// trigger a rerender with the new content - now the useEffect runs
setDisplayed(rightPan ? displayed - 1 : displayed + 1);
}
}
);
}
}}
>
<span style={{ userSelect: "none" }}>
{"I am element #" + displayed}
</span>
</motion.div>
</div>
);
}
Check this codesandbox out!
I want to use the AFrame raycaster component to catch intersections with objects. I'm adding my custom objects to a GLTF model. I'm calling them "collision-shapes" and they're been used to catch collisions between gilt models and projectiles. Use case: shooting a bullet into an enemy.
The problem is that for some models it works, but for some of them it catches intersections outside the collision shape.
To position a collision shape I use a bone name the collision object should be anchored to.
My code is the following (I removed some parts to make it shorter):
<a-gltf-model src="#bird"
position="2 -75 -300"
animation-mixer
scale="1 1 1"
shape__Bone_38_08="bone: Bone_38_08; shape: box; halfExtents: 10 10 5"
shape__Bone_39_07="bone: Bone_39_07; shape: box; halfExtents: 15 10 10">
</a-gltf-model>
<a-gltf-model src="#orc" position="-2 0 -5" animation-mixer="clip: Orc.004" scale="2 2 2" rotation="0 180 0"
shape__hair_1="bone: hair_1; shape: box; halfExtents: 0.05 0.075 0.05"
shape__leg_L_1="bone: leg_L_1; shape: box; halfExtents: 0.05 0.125 0.05; offset: 0 -0.05 -0.1">
</a-gltf-model>
<a-entity camera look-controls position="0 1.6 0" wasd-controls>
<a-cursor color="gray" raycaster="objects: [data-raycastable]" ></a-cursor>
</a-entity>
The components:
AFRAME.registerComponent("shape", {
schema: {
bone: { default: "" },
shape: { default: "box", oneOf: ["box", "sphere", "cylinder"] },
offset: { type: "vec3", default: { x: 0, y: 0, z: 0 } },
orientation: { type: "vec4", default: { x: 0, y: 0, z: 0, w: 1 } },
// box
halfExtents: { type: "vec3", default: { x: 0.5, y: 0.5, z: 0.5 }, if: { shape: ["box"] } },
visible: { type: "boolean", default: true }
},
multiple: true,
init(){
const data = this.data;
const self = this;
const el = this.el;
el.addEventListener("model-loaded", function modelReady() {
el.removeEventListener("model-loaded", modelReady);
const boneDummy = document.createElement("a-entity");
self.setDummyShape(boneDummy, data);
self.boneObj = self.getBone(el.object3D, data.bone);
el.appendChild(boneDummy);
self.boneDummy = boneDummy;
});
},
setDummyShape(dummy, data) {
const shapeName = "collidable-shape";
const config = {
shapeName: data.bone,
shape: data.shape,
offset: data.offset,
halfExtents: data.halfExtents
};
dummy.setAttribute(shapeName, config);
},
getBone(root, boneName) {
let bone = root.getObjectByName(boneName);
if (!bone) {
root.traverse(node => {
const n = node;
if (n?.isBone && n.name.includes(boneName)) {
bone = n;
}
});
}
return bone;
},
inverseWorldMatrix: new THREE.Matrix4(),
boneMatrix: new THREE.Matrix4(),
tick() {
const el = this.el;
if (!el) { throw Error("AFRAME entity is undefined."); }
if (!this.boneObj || !this.boneDummy) return;
this.inverseWorldMatrix.copy(el.object3D.matrix).invert();
this.boneMatrix.multiplyMatrices(this.inverseWorldMatrix, this.boneObj.matrixWorld);
this.boneDummy.object3D.position.setFromMatrixPosition(this.boneMatrix);
}
})
AFRAME.registerComponent("collidable-shape", {
schema: {
shape: { default: "box", oneOf: ["box", "sphere", "cylinder"] },
offset: { type: "vec3", default: { x: 0, y: 0, z: 0 } },
orientation: { type: "vec4", default: { x: 0, y: 0, z: 0, w: 1 } },
// box
halfExtents: { type: "vec3", default: { x: 0.5, y: 0.5, z: 0.5 }, if: { shape: ["box"] } },
visible: { type: "boolean", default: true }
},
collistionObject: null ,
multiple:true,
init() {
const scene = this.el.sceneEl;
if (!scene) { throw Error("AFRAME scene is undefined."); }
if (scene.hasLoaded) {
this.initShape();
} else {
scene.addEventListener("loaded", this.initShape.bind(this));
}
},
initShape() {
const data = this.data;
this.el.setAttribute("data-raycastable", "");
this.el.addEventListener('mouseenter', evt => {
console.log("mouse enter", data.shape);
this.el.object3D.children[0].material.color.setHex(0x00ff00);
});
this.el.addEventListener('mouseleave', evt => {
console.log("mouse leave", data.shape);
this.el.object3D.children[0].material.color.setHex(0xff0000);
});
const scale = new THREE.Vector3(1, 1, 1);
this.el.object3D.getWorldScale(scale);
let shape;
let offset;
let orientation;
if (Object.prototype.hasOwnProperty.call(data, "offset")) {
offset = new THREE.Vector3(
data.offset.x * scale.x,
data.offset.y * scale.y,
data.offset.z * scale.z
);
}
if (Object.prototype.hasOwnProperty.call(data, "orientation")) {
orientation = new THREE.Quaternion();
orientation.copy(data.orientation);
}
switch (data.shape) {
case "box":
shape = new THREE.BoxGeometry(
data.halfExtents.x * 2 * scale.x,
data.halfExtents.y * 2 * scale.y,
data.halfExtents.z * 2 * scale.z
);
break;
}
this._applyShape(shape, offset, data.visible);
},
_applyShape(shape, offset, visible) {
const material = new THREE.MeshBasicMaterial({ color: 0xff0000, transparent: true, opacity: 0.3 });
const wireframe = new THREE.LineSegments(
new THREE.EdgesGeometry(shape),
new THREE.LineBasicMaterial({ color: 0xff0000, linewidth: 3 }));
this.collistionObject = new THREE.Mesh(shape, material);
this.collistionObject.add(wireframe);
if (offset) {
this.collistionObject.position.set(offset.x, offset.y, offset.z);
}
this.collistionObject.visible = visible === true;
this.el.setObject3D("mesh", this.collistionObject);
const size = new THREE.Vector3();
const box = new THREE.Box3().setFromObject(this.el.object3D);
box.getSize(size);
const bbox = new THREE.BoxGeometry(size.x, size.y, size.z);
const bboxWireframe = new THREE.LineSegments(
new THREE.EdgesGeometry(bbox),
new THREE.LineBasicMaterial({ color: 0x000000, linewidth: 10 }));
this.el.object3D.add(bboxWireframe)
}
});
The sample project can be found here: https://glitch.com/edit/#!/collisons-test
Please note, it works as expected for the bird, but behaves strange for the orc. Also the bounding box doesn't match the collision-shape box itself. This is also something not clear to me.
Also the bounding box doesn't match the collision-shape box itself.
The bounding box is taking the world matrix into account. You can see how it's changing when the model scale is different:
Also you can see the red boxes also aren't scaling nicely. I think most problems here are a result of scale mixups.
The problem is that for some models it works, but for some of them it catches intersections outside the collision shape.
Adding the wireframes before setting the object3D messes up with the raycaster. Not sure but I guess this is because scaling issues as well.
Here's a glitch with setting the wireframes after setObject3D
I'd start with a different approach. Create the boxes as scene children and manage their transform based on the model worldMatrix + bone offsets. It will be way easier to manage (scale up/down, reposition) and debug.
I have a map with location markers and I want to change the size of the markers when a user zooms in/out. I'm capturing the zoom level in state and it's a floating point number (i.e. 11.3235).
How can I pass this number (from state) to a css class so the size of my markers would dynamically change its size? Instead of having a small/medium/large marker, I want the width and height of my marker to be precise according to the zoom level.
class Mapbox extends Component {
constructor(props){
super(props)
this.state = {
viewport: {
width: 900,
height: 500,
latitude: 49.2463,
longitude: -123.1162,
zoom: 11
},
active_vehicles: [],
marker_size: 0,
};
}
_onViewportChange = viewport => {
this.setState({viewport});
// Depending on zoom level, change Markers' class to dynamically change their display size/style
this.setState({marker_size: this.state.viewport.zoom})
};
createMarkers = () => {
let markers = []
if(this.state.active_vehicles){
for (let i = 0; i < this.state.active_vehicles.length; i++) {
markers.push(
<Marker key={i} latitude={this.state.active_vehicles[i]["Latitude"]} longitude={this.state.active_vehicles[i]["Longitude"]}>
<div className={"location-marker " + this.state.marker_size}></div>
</Marker>
)
}
return markers
}
}
render() {
return (
<ReactMapGL
// mapbox API access token
mapboxApiAccessToken={MAPBOX_TOKEN}
mapStyle="mapbox://styles/mapbox/dark-v9"
{...this.state.viewport}
onViewportChange={this._onViewportChange}>
<div>
{this.createMarkers()}
</div>
</ReactMapGL>
);
}
}
the recent v0.3.0 blog post mentions WebVR 1.0 support allowing "us to have different content on the desktop display than the headset, opening the door for asynchronous gameplay and spectator modes." This is precisely what I'm trying to get working. I'm looking to have one camera in the scene represent the viewpoint of the HMD and a secondary camera represent a spectator of the same scene and render that view to a canvas on the same webpage. 0.3.0 removes the ability to render a-scene to a specific canvas in favor of embedded component. Any thoughts on how to accomplish two cameras rendering a single scene simultaneously?
My intention is to have a the desktop display show what a user is doing from a different perspective. My end goal is to be able to build a mixed reality green screen component.
While there may be a better or cleaner way to do this in the future, I was able to get a second camera rendering by looking at examples of how this is done in the THREE.js world.
I add a component to a non-active camera called spectator. in the init function I set up a new renderer and attach to div outside the scene to create a new canvas. I then call the render method inside the tick() part of the lifecycle.
I have not worked out how to isolate the movement of this camera yet. The default look controls of the 0.3.0 aframe scene still control both camera
Source code:
https://gist.github.com/derickson/334a48eb1f53f6891c59a2c137c180fa
I've created a set of components that can help with this. https://github.com/diarmidmackenzie/aframe-multi-camera
Here's an example showing usage with A-Frame 1.2.0 to display the main camera on the left half of the screen, and a secondary camera on the right half.
<!DOCTYPE html>
<html>
<head>
<script src="https://aframe.io/releases/1.2.0/aframe.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/diarmidmackenzie/aframe-multi-camera#latest/src/multi-camera.min.js"></script>
</head>
<body>
<div>
<a-scene>
<a-entity camera look-controls wasd-controls position="0 1.6 0">
<!-- first secondary camera is a child of the main camera, so that it always has the same position / rotation -->
<!-- replace main camera (since main camera is rendered across the whole screen, which we don't want) -->
<a-entity
id="camera1"
secondary-camera="outputElement:#viewport1;sequence: replace"
>
</a-entity>
</a-entity>
<!-- PUT YOUR SCENE CONTENT HERE-->
<!-- position of 2nd secondary camera-->
<a-entity
id="camera2"
secondary-camera="outputElement:#viewport2"
position="8 1.6 -6"
rotation="0 90 0"
>
</a-entity>
</a-scene>
</div>
<!-- standard HTML to contrl layout of the two viewports-->
<div style="width: 100%; height:100%; display: flex">
<div id="viewport1" style="width: 50%; height:100%"></div>
<div id="viewport2" style="width: 50%; height:100%"></div>
</div>
</body>
</html>
Also here as a glitch: https://glitch.com/edit/#!/recondite-polar-hyssop
It's also been suggested that I post the entire source code for the multi-camera component here.
Here it is...
/* System that supports capture of the the main A-Frame render() call
by add-render-call */
AFRAME.registerSystem('add-render-call', {
init() {
this.render = this.render.bind(this);
this.originalRender = this.el.sceneEl.renderer.render;
this.el.sceneEl.renderer.render = this.render;
this.el.sceneEl.renderer.autoClear = false;
this.preRenderCalls = [];
this.postRenderCalls = [];
this.suppresssDefaultRenderCount = 0;
},
addPreRenderCall(render) {
this.preRenderCalls.push(render)
},
removePreRenderCall(render) {
const index = this.preRenderCalls.indexOf(render);
if (index > -1) {
this.preRenderCalls.splice(index, 1);
}
},
addPostRenderCall(render) {
this.postRenderCalls.push(render)
},
removePostRenderCall(render) {
const index = this.postRenderCalls.indexOf(render);
if (index > -1) {
this.postRenderCalls.splice(index, 1);
}
else {
console.warn("Unexpected failure to remove render call")
}
},
suppressOriginalRender() {
this.suppresssDefaultRenderCount++;
},
unsuppressOriginalRender() {
this.suppresssDefaultRenderCount--;
if (this.suppresssDefaultRenderCount < 0) {
console.warn("Unexpected unsuppression of original render")
this.suppresssDefaultRenderCount = 0;
}
},
render(scene, camera) {
renderer = this.el.sceneEl.renderer
// set up THREE.js stats to correctly count across all render calls.
renderer.info.autoReset = false;
renderer.info.reset();
this.preRenderCalls.forEach((f) => f());
if (this.suppresssDefaultRenderCount <= 0) {
this.originalRender.call(renderer, scene, camera)
}
this.postRenderCalls.forEach((f) => f());
}
});
/* Component that captures the main A-Frame render() call
and adds an additional render call.
Must specify an entity and component that expose a function call render(). */
AFRAME.registerComponent('add-render-call', {
multiple: true,
schema: {
entity: {type: 'selector'},
componentName: {type: 'string'},
sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'}
},
init() {
this.invokeRender = this.invokeRender.bind(this);
},
update(oldData) {
// first clean up any old settings.
this.removeSettings(oldData)
// now add new settings.
if (this.data.sequence === "before") {
this.system.addPreRenderCall(this.invokeRender)
}
if (this.data.sequence === "replace") {
this.system.suppressOriginalRender()
}
if (this.data.sequence === "after" ||
this.data.sequence === "replace")
{
this.system.addPostRenderCall(this.invokeRender)
}
},
remove() {
this.removeSettings(this.data)
},
removeSettings(data) {
if (data.sequence === "before") {
this.system.removePreRenderCall(this.invokeRender)
}
if (data.sequence === "replace") {
this.system.unsuppressOriginalRender()
}
if (data.sequence === "after" ||
data.sequence === "replace")
{
this.system.removePostRenderCall(this.invokeRender)
}
},
invokeRender()
{
const componentName = this.data.componentName;
if ((this.data.entity) &&
(this.data.entity.components[componentName])) {
this.data.entity.components[componentName].render(this.el.sceneEl.renderer, this.system.originalRender);
}
}
});
/* Component to set layers via HTML attribute. */
AFRAME.registerComponent('layers', {
schema : {type: 'number', default: 0},
init: function() {
setObjectLayer = function(object, layer) {
if (!object.el ||
!object.el.hasAttribute('keep-default-layer')) {
object.layers.set(layer);
}
object.children.forEach(o => setObjectLayer(o, layer));
}
this.el.addEventListener("loaded", () => {
setObjectLayer(this.el.object3D, this.data);
});
if (this.el.hasAttribute('text')) {
this.el.addEventListener("textfontset", () => {
setObjectLayer(this.el.object3D, this.data);
});
}
}
});
/* This component has code in common with viewpoint-selector-renderer
However it's a completely generic stripped-down version, which
just delivers the 2nd camera function.
i.e. it is missing:
- The positioning of the viewpoint-selector entity.
- The cursor / raycaster elements.
*/
AFRAME.registerComponent('secondary-camera', {
schema: {
output: {type: 'string', oneOf: ['screen', 'plane'], default: 'screen'},
outputElement: {type: 'selector'},
cameraType: {type: 'string', oneOf: ['perspective, orthographic'], default: 'perspective'},
sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'},
quality: {type: 'string', oneOf: ['high, low'], default: 'high'}
},
init() {
if (!this.el.id) {
console.error("No id specified on entity. secondary-camera only works on entities with an id")
}
this.savedViewport = new THREE.Vector4();
this.sceneInfo = this.prepareScene();
this.activeRenderTarget = 0;
// add the render call to the scene
this.el.sceneEl.setAttribute(`add-render-call__${this.el.id}`,
{entity: `#${this.el.id}`,
componentName: "secondary-camera",
sequence: this.data.sequence});
// if there is a cursor on this entity, set it up to read this camera.
if (this.el.hasAttribute('cursor')) {
this.el.setAttribute("cursor", "canvas: user; camera: user");
this.el.addEventListener('loaded', () => {
this.el.components['raycaster'].raycaster.layers.mask = this.el.object3D.layers.mask;
const cursor = this.el.components['cursor'];
cursor.removeEventListeners();
cursor.camera = this.camera;
cursor.canvas = this.data.outputElement;
cursor.canvasBounds = cursor.canvas.getBoundingClientRect();
cursor.addEventListeners();
cursor.updateMouseEventListeners();
});
}
if (this.data.output === 'plane') {
if (!this.data.outputElement.hasLoaded) {
this.data.outputElement.addEventListener("loaded", () => {
this.configureCameraToPlane()
});
} else {
this.configureCameraToPlane()
}
}
},
configureCameraToPlane() {
const object = this.data.outputElement.getObject3D('mesh');
function nearestPowerOf2(n) {
return 1 << 31 - Math.clz32(n);
}
// 2 * nearest power of 2 gives a nice look, but at a perf cost.
const factor = (this.data.quality === 'high') ? 2 : 1;
const width = factor * nearestPowerOf2(window.innerWidth * window.devicePixelRatio);
const height = factor * nearestPowerOf2(window.innerHeight * window.devicePixelRatio);
function newRenderTarget() {
const target = new THREE.WebGLRenderTarget(width,
height,
{
minFilter: THREE.LinearFilter,
magFilter: THREE.LinearFilter,
stencilBuffer: false,
generateMipmaps: false
});
return target;
}
// We use 2 render targets, and alternate each frame, so that we are
// never rendering to a target that is actually in front of the camera.
this.renderTargets = [newRenderTarget(),
newRenderTarget()]
this.camera.aspect = object.geometry.parameters.width /
object.geometry.parameters.height;
},
remove() {
this.el.sceneEl.removeAttribute(`add-render-call__${this.el.id}`);
if (this.renderTargets) {
this.renderTargets[0].dispose();
this.renderTargets[1].dispose();
}
// "Remove" code does not tidy up adjustments made to cursor component.
// rarely necessary as cursor is typically put in place at the same time
// as the secondary camera, and so will be disposed of at the same time.
},
prepareScene() {
this.scene = this.el.sceneEl.object3D;
const width = 2;
const height = 2;
if (this.data.cameraType === "orthographic") {
this.camera = new THREE.OrthographicCamera( width / - 2, width / 2, height / 2, height / - 2, 1, 1000 );
}
else {
this.camera = new THREE.PerspectiveCamera( 45, width / height, 1, 1000);
}
this.scene.add(this.camera);
return;
},
render(renderer, renderFunction) {
// don't bother rendering to screen in VR mode.
if (this.data.output === "screen" && this.el.sceneEl.is('vr-mode')) return;
var elemRect;
if (this.data.output === "screen") {
const elem = this.data.outputElement;
// get the viewport relative position of this element
elemRect = elem.getBoundingClientRect();
this.camera.aspect = elemRect.width / elemRect.height;
}
// Camera position & layers match this entity.
this.el.object3D.getWorldPosition(this.camera.position);
this.el.object3D.getWorldQuaternion(this.camera.quaternion);
this.camera.layers.mask = this.el.object3D.layers.mask;
this.camera.updateProjectionMatrix();
if (this.data.output === "screen") {
// "bottom" position is relative to the whole viewport, not just the canvas.
// We need to turn this into a distance from the bottom of the canvas.
// We need to consider the header bar above the canvas, and the size of the canvas.
const mainRect = renderer.domElement.getBoundingClientRect();
renderer.getViewport(this.savedViewport);
renderer.setViewport(elemRect.left - mainRect.left,
mainRect.bottom - elemRect.bottom,
elemRect.width,
elemRect.height);
renderFunction.call(renderer, this.scene, this.camera);
renderer.setViewport(this.savedViewport);
}
else {
// target === "plane"
// store off current renderer properties so that they can be restored.
const currentRenderTarget = renderer.getRenderTarget();
const currentXrEnabled = renderer.xr.enabled;
const currentShadowAutoUpdate = renderer.shadowMap.autoUpdate;
// temporarily override renderer proeperties for rendering to a texture.
renderer.xr.enabled = false; // Avoid camera modification
renderer.shadowMap.autoUpdate = false; // Avoid re-computing shadows
const renderTarget = this.renderTargets[this.activeRenderTarget];
renderTarget.texture.encoding = renderer.outputEncoding;
renderer.setRenderTarget(renderTarget);
renderer.state.buffers.depth.setMask( true ); // make sure the depth buffer is writable so it can be properly cleared, see #18897
renderer.clear();
renderFunction.call(renderer, this.scene, this.camera);
this.data.outputElement.getObject3D('mesh').material.map = renderTarget.texture;
// restore original renderer settings.
renderer.setRenderTarget(currentRenderTarget);
renderer.xr.enabled = currentXrEnabled;
renderer.shadowMap.autoUpdate = currentShadowAutoUpdate;
this.activeRenderTarget = 1 - this.activeRenderTarget;
}
}
});