slideshow stuck framer motion - framer-motion

I want to make a slide show in framer motion and I found that in framer motion docs they have an example slide show like this https://codesandbox.io/s/framer-motion-image-gallery-pqvx3?from-embed=&file=/src/Example.tsx, but I found a bug when we drag and double click it, it will be stuck like this picture .
import * as React from "react";
import { useState } from "react";
import { motion, AnimatePresence } from "framer-motion";
import { wrap } from "popmotion";
import { images } from "./image-data";
const variants = {
enter: (direction: number) => {
return {
x: direction > 0 ? 1000 : -1000,
opacity: 0
};
},
center: {
zIndex: 1,
x: 0,
opacity: 1
},
exit: (direction: number) => {
return {
zIndex: 0,
x: direction < 0 ? 1000 : -1000,
opacity: 0
};
}
};
const swipeConfidenceThreshold = 10000;
const swipePower = (offset: number, velocity: number) => {
return Math.abs(offset) * velocity;
};
export const Example = () => {
const [[page, direction], setPage] = useState([0, 0]);images.
const imageIndex = wrap(0, images.length, page);
const paginate = (newDirection: number) => {
setPage([page + newDirection, newDirection]);
};
return (
<>
<AnimatePresence initial={false} custom={direction}>
<motion.img
key={page}
src={images[imageIndex]}
custom={direction}
variants={variants}
initial="enter"
animate="center"
exit="exit"
transition={{
x: { type: "spring", stiffness: 300, damping: 30 },
opacity: { duration: 0.2 }
}}
drag="x"
dragConstraints={{ left: 0, right: 0 }}
dragElastic={1}
onDragEnd={(e, { offset, velocity }) => {
const swipe = swipePower(offset.x, velocity.x);
if (swipe < -swipeConfidenceThreshold) {
paginate(1);
} else if (swipe > swipeConfidenceThreshold) {
paginate(-1);
}
}}
/>
</AnimatePresence>
</>
);
};
I try to solve this problem but still can't fix it, can someone help me?

This looks like a bug of framer-motion.
Up until v1.6.2, everything works fine. The bug seems to occur in all later versions.
There is also an interesting changelog:
[1.6.3] 2019-08-19
Fixed
Ensuring onDragEnd always fires after if onDragStart fired.
Here is a link to the related issue on GitHub, opened by the author of this question.
Until that bug is fixed, here is a workaround that uses Pan events
export default function Carousel() {
const animationConfidenceThreshold = 200; // you have to move the element 200px in order to perform an animation
const [displayed, setDisplayed] = useState(0); // the index of the displayed element
const xOffset = useMotionValue(0); // this is the motion value that drives the translation
const lastOffset = useRef(0); // this is the lastValue of the xOffset after the Pan ended
const elementAnimatingIn = useRef(false); // this will be set to true whilst a new element is performing its animation to the center
useEffect(() => {
// this happens after we have dragged the element out and triggered a rerender
if (elementAnimatingIn.current) {
const rightPan = xOffset.get() > 0; // check if the user drags it to the right
// if the element has animated out to the right it animates in from the left
xOffset.set(
rightPan ? -1 * window.innerWidth - 200 : window.innerWidth + 200
);
// perform the animation to the center
animate(xOffset, 0, {
duration: 0.5,
onComplete: () => {
xOffset.stop();
},
onStop: () => {
elementAnimatingIn.current = false;
lastOffset.current = xOffset.get();
}
});
}
});
return (
<div className="container">
<motion.div
className="carouselElement"
onPan={(e, info) => {
xOffset.set(lastOffset.current + info.offset.x); // set the xOffset to the current offset of the pan + the prev offset
}}
style={{ x: xOffset }}
onPanStart={() => {
// check if xOffset is animating, if true stop animation and set lastOffset to current xOffset
if (xOffset.isAnimating()) {
xOffset.stop();
lastOffset.current = xOffset.get();
}
}}
onPanEnd={(e, info) => {
// there can be a difference between the info.offset.x in onPan and onPanEnd
// so we will set the xOffset to the info.offset.x when the pan ends
xOffset.set(lastOffset.current + info.offset.x);
lastOffset.current = xOffset.get(); // set the lastOffset to the current xOffset
if (Math.abs(lastOffset.current) < animationConfidenceThreshold) {
// if its only a small movement, animate back to the initial position
animate(xOffset, 0, {
onComplete: () => {
lastOffset.current = 0;
}
});
} else {
// perform the animation to the next element
const rightPan = xOffset.get() > 0; // check if the user drags it to the right
animate(
xOffset,
rightPan ? window.innerWidth + 200 : -1 * window.innerWidth - 200, // animate out of view
{
duration: 0.5,
onComplete: () => {
// after the element has animated out
// stop animation (it does not do this on its own, only one animation can happen at a time)
xOffset.stop();
elementAnimatingIn.current = true;
// trigger a rerender with the new content - now the useEffect runs
setDisplayed(rightPan ? displayed - 1 : displayed + 1);
}
}
);
}
}}
>
<span style={{ userSelect: "none" }}>
{"I am element #" + displayed}
</span>
</motion.div>
</div>
);
}
Check this codesandbox out!

Related

The custom marker of the lightningchart doesn't work properly

I am using the marker from current example but with vertical oriented chart and a few my upgrades. So my problem that in case of vertical chart the labels and values of variables isn't shown. But the same logic is properly works with horizontal oriented chart.
private createCustomMarker(): void {
if (!this.seriesInstances.length) return;
const resultTable: UIElementColumn<UIBackground> = this.chartInstance
.addUIElement(UILayoutBuilders.Column, {
x: this.chartInstance.getDefaultAxisX(),
y: this.chartInstance.getDefaultAxisY()
})
.setMouseInteractions(false)
.setOrigin(UIOrigins.LeftCenter)
.setMargin(5);
const datetimeRow: UITextBox<UIBackground> = resultTable
.addElement(UILayoutBuilders.Row)
.addElement(UIElementBuilders.TextBox);
const rowsY: UITextBox<UIBackground>[] = this.seriesInstances
.map((el: ISeriesInstance, i: number) => {
return resultTable
.addElement(UILayoutBuilders.Row)
.addElement(UIElementBuilders.TextBox)
.setTextFillStyle(this.seriesInstances[i].instance.getStrokeStyle().getFillStyle());
});
const tick: CustomTick = (this.isAppearanceHorizontal ? this.chartInstance.getDefaultAxisX() : this.chartInstance.getDefaultAxisY())
.addCustomTick()
.setAllocatesAxisSpace(false)
.disposeMarker();
// Hide custom cursor components initially.
resultTable.dispose();
tick.dispose();
this.chartInstance.onSeriesBackgroundMouseMove((_: ChartXY<PointMarker, UIBackground>, event: MouseEvent): void => {
const mouseLocationClient: { x: number; y: number } = { x: event.clientX, y: event.clientY };
const mouseLocationEngine: Point = this.chartInstance.engine.clientLocation2Engine(
mouseLocationClient.x,
mouseLocationClient.y
);
// Translate mouse location to LCJS coordinate system for solving data points from series, and translating to Axes.
// Translate mouse location to Axis.
const mouseLocationAxis: Point = translatePoint(
mouseLocationEngine,
this.chartInstance.engine.scale,
this.seriesInstances[0].instance.scale
);
// Solve the nearest data point to the mouse on each series.
const nearestDataPoints: CursorPoint<Series2D>[] = this.seriesInstances.map((el: ISeriesInstance) => {
return el.instance.solveNearestFromScreen(mouseLocationEngine) // on this line the most of elements have undefined, but data for it exists and poits are near beetween each other
});
// console.log(nearestDataPoints);
// Find the nearest solved data point to the mouse.
const nearestPoint: CursorPoint<Series2D> = nearestDataPoints.reduce((prev: CursorPoint<Series2D>, curr: CursorPoint<Series2D>) => {
if (!prev) return curr;
if (!curr) return prev;
if (this.isAppearanceHorizontal) {
return Math.abs(mouseLocationAxis.y - curr.location.y) < Math.abs(mouseLocationAxis.y - prev.location.y) ? curr : prev;
} else {
return Math.abs(mouseLocationAxis.x - curr.location.x) < Math.abs(mouseLocationAxis.x - prev.location.x) ? curr : prev
}
});
if (nearestPoint) {
// Set custom cursor location.
resultTable.setPosition({
x: mouseLocationAxis.x,
y: mouseLocationAxis.y,
});
// Change origin of result table based on cursor location.
let resultTableOrigin;
const yScale: number = this.chartInstance.engine.scale.y.getInnerInterval();
const isResultTableOriginXRight: boolean = mouseLocationEngine.x > this.chartInstance.engine.scale.x.getInnerInterval() / 2;
if (mouseLocationEngine.y > yScale - (yScale / 100 * 30)) { // mouseLocationEngine.y > yScale - 30%
resultTableOrigin = isResultTableOriginXRight ? UIOrigins.RightTop : UIOrigins.LeftTop;
} else if (mouseLocationEngine.y < yScale / 100 * 30) { // mouseLocationEngine.y > 30% of yScale
resultTableOrigin = isResultTableOriginXRight ? UIOrigins.RightBottom : UIOrigins.LeftBottom;
} else {
resultTableOrigin = isResultTableOriginXRight ? UIOrigins.RightCenter : UIOrigins.LeftCenter;
}
resultTable.setOrigin(resultTableOrigin);
// Format result table text.
const datetimeValue = this.isAppearanceHorizontal
? this.chartInstance.getDefaultAxisX().formatValue(nearestPoint.location.x)
: this.chartInstance.getDefaultAxisY().formatValue(nearestPoint.location.y)
datetimeRow.setText(`${datetimeValue}`);
rowsY.map((rowY: UITextBox<UIBackground>, i: number) => {
// this.seriesInstances[i].instance.isDisposed() ? rowY.dispose() : rowY.restore(); after this line labels of the table is low font contrast
if (nearestDataPoints[i]?.location) {
const foundSeries = chain(this.track.series)
.flatMap()
.value()[i]
const value: string = this.isAppearanceHorizontal
? this.chartInstance.getDefaultAxisY().formatValue(nearestDataPoints[i].location.y)
: this.chartInstance.getDefaultAxisX().formatValue(nearestDataPoints[i].location.x)
rowY.setText(`${this.seriesInstances[i].instance.getName()}: ${value} ${foundSeries.unit}`) // probleblem on this line
}
});
tick.setValue(
this.isAppearanceHorizontal
? nearestPoint.location.x
: nearestPoint.location.y
);
resultTable.restore();
tick.restore();
} else {
resultTable.dispose();
tick.dispose();
}
});
this.chartInstance.onSeriesBackgroundMouseLeave(() => {
resultTable.dispose();
tick.dispose();
});
this.chartInstance.onSeriesBackgroundMouseDragStart(() => {
resultTable.dispose();
tick.dispose();
});
}
I want to understand why almost the same code don't work on vertical oriented chart, but on horizontal works good.

How To Crop uploaded image with react-konva

I am using react-konva and I want to crop my selected image when edit button clicked.
Can anyone please guide me how I can achieve this ?
this is the Rect I am using to crop the portion of the image.
Here in this code onShapeChange function saves the crop value of the image in
canvas editor.
{(isCropping &&
<>
{React.createElement(`Rect`, {
ref: cropRef,
key: selectedShape.id,
id: selectedShape.id,
...selectedShape.attributes,
draggable: false,
onTransformEnd: (e) => {
const node = cropRef.current;
const scaleX = node.scaleX();
const scaleY = node.scaleY();
node.scaleX(1);
node.scaleY(1);
const newShape = {
...selectedShape,
attributes:
{
...selectedShape.attributes,
crop: {
x: node.x() - selectedShape.attributes.x,
y: node.y() - selectedShape.attributes.y,
// width: this.state.rect.attrs.width,
// height: this.state.rect.attrs.height
// x: node.x(),
// y: node.y(),
width: Math.max(5, node.width() * scaleX),
height: Math.max(node.height() * scaleY),
}
}
}
console.log('newShape in cropper', newShape, 'SelectedShape', selectedShape);
onShapeChange({
id: selectedShape.id,
index: selectedReportItem.index,
reportIndex: selectedReportItem.reportIndex,
newItem: newShape,
})
setIsCropping(false);
}
}, null)}
<Transformer
ref={croptrRef}
rotateEnabled={false}
flipEnabled={false}
boundBoxFunc={(oldBox, newBox) => {
// limit resize
if (newBox.width < 5 || newBox.height < 5) {
return oldBox;
}
return newBox;
}}
/>
</>
}

Rendering components only if they can fit inside a flex container

I am trying to build a toolbar that hides components from the right if there is not enough space to render them. My approach is to use refs and add up the width and render based on the condition if the total width has been overflowed. I want to get something working and go on improving it from there. It seems to work 'ok' when the screen size is decreased but not when trying to 're-render' the components when there is room. I suspect adding a display style of 'none' is causing some of the issues.
componentDidMount() {
this.littleFunction();
window.addEventListener('resize', this.littleFunction);
}
littleFunction = () => {
let sofar = 0;
for (const ref in this.refs) {
sofar += this.refs[ref].offsetWidth;
const index = ref.indexOf('test');
console.log(ref, sofar, this.input.offsetWidth);
if (sofar > this.input.offsetWidth && index === -1) {
this.refs[ref].style.display = 'none';
}
// // console.log(typeof this.refs[ref].style.display, this.refs[ref].style.display);
// if (this.refs[ref] !== this.input) {
// sofar = this.refs[ref].offsetWidth + sofar;
// }
// const index = ref.indexOf('test');
// // console.log(sofar, this.input.offsetWidth, index);
// if (sofar >= this.input.offsetWidth && index === -1) {
// this.refs[ref].style.display = 'none';
//
// this.forceUpdate();
// } else if (sofar < this.input.offsetWidth && index === -1) {
// // console.log('inhiaaa', sofar, this.input.offsetWidth);
// this.refs[ref].style.display = '';
//
// this.forceUpdate();
// }
}
}
After thinking about this for a while, i realized that if i set the style to display: 'none', the next time I try to run this logic to check how many components can fit, I am actually not getting the length back from the components that were previously set to display: 'none'. What I did was save the width of the components before applying calling the function.
componentDidMount() {
this.widths = new List();
for (const ref in this.refs) {
this.widths = this.widths.push(Map({
name: ref,
length: this.refs[ref].offsetWidth,
}));
}
this.littleFunction();
window.addEventListener('resize', this.littleFunction);
}
littleFunction = () => {
let sofar = 0;
this.widths.forEach(item => {
sofar += item.get('length');
const index = item.get('name').indexOf('test');
if (sofar > this.input.offsetWidth && index === -1) {
this.refs[item.get('name')].style.display = 'none';
// this.forceUpdate();
} else if (index === -1) {
this.refs[item.get('name')].style.display = 'inline';
// this.forceUpdate();
}
});
}
Have the toolbar to have style { width: '100%', overflow: 'hidden', whiteSpace: 'nowrap' } should give you the desired effect.

React Virtualized: Collection with cells that have the same fixed height but different widths

I'm a little confused if I can use React Virtualized's Collection component to solve my problem. I'll try to describe what I'm doing:
I'm using React Virtualized on a page to display two lists/collections of items. I've finished the first collection which has items that have the same width and height:
The first collection was pretty straight forward and easy to implement.
Now I'm working on the second collection which contains images of varying sizes. I want the cells to have the same height but different widths (depending on the image dimensions of course). The problem is that rows might not always have the same number of cells:
Is this possible to achieve with React Virtualized? If so, how can I determine the position in "cellSizeAndPositionGetter"?
I recently used react-virtualized List to display rows of fixed-height, variable-width image cards and it worked great.
My List rowRenderer uses an array of rows of image card elements. That is, an array of arrays of react components, as JSX.
See my final function, cardsRows, for how I build the rows based on element widths and screen width.
Here's how it looks:
Hope this helps!
Some snippets of my code:
import {AutoSizer, List} from 'react-virtualized';
...
updateDimensions() {
this.setState({
screenWidth: window.innerWidth,
});
}
componentDidMount() {
window.addEventListener("resize", this.updateDimensions);
}
componentDidUpdate(prevProps, prevState) {
const props = this.props;
const state = this.state;
if (JSON.stringify(props.imageDocs) !== JSON.stringify(prevProps.imageDocs) || state.screenWidth !== prevState.screenWidth)
this.setState({
cardsRows: cardsRows(props, state.screenWidth),
});
}
rowRenderer({key, index, style, isScrolling}) {
if (!this.state.cardsRows.length)
return '';
return (
<div id={index} title={this.state.cardsRows[index].length} key={key} style={style}>
{this.state.cardsRows[index]}
</div>
);
}
...
render() {
return (
<div style={styles.subMain}>
<AutoSizer>
{({height, width}) => (<List height={height}
rowCount={this.state.cardsRows.length}
rowHeight={164}
rowRenderer={this.rowRenderer}
width={width}
overscanRowCount={2}
/>
)}
</AutoSizer>
</div>
);
}
...
const cardsRows = (props, screenWidth) => {
const rows = [];
let rowCards = [];
let rowWidth = 0;
const distanceBetweenCards = 15;
for (const imageDoc of props.imageDocs) {
const imageWidth = getWidth(imageDoc);
if (rowWidth + distanceBetweenCards * 2 + imageWidth <= screenWidth) {
rowCards.push(cardElement(imageDoc));
rowWidth += distanceBetweenCards + imageWidth;
}
else {
rows.push(rowCards);
rowCards = [];
rowWidth = distanceBetweenCards;
}
}
if (rowCards.length) {
rows.push(rowCards);
}
return rows;
};
const styles = {
subMain: {
position: 'absolute',
display: 'block',
top: 0,
right: 0,
left: 0,
bottom: 0,
}
};

Rendering from two cameras at the same time in A-Frame

the recent v0.3.0 blog post mentions WebVR 1.0 support allowing "us to have different content on the desktop display than the headset, opening the door for asynchronous gameplay and spectator modes." This is precisely what I'm trying to get working. I'm looking to have one camera in the scene represent the viewpoint of the HMD and a secondary camera represent a spectator of the same scene and render that view to a canvas on the same webpage. 0.3.0 removes the ability to render a-scene to a specific canvas in favor of embedded component. Any thoughts on how to accomplish two cameras rendering a single scene simultaneously?
My intention is to have a the desktop display show what a user is doing from a different perspective. My end goal is to be able to build a mixed reality green screen component.
While there may be a better or cleaner way to do this in the future, I was able to get a second camera rendering by looking at examples of how this is done in the THREE.js world.
I add a component to a non-active camera called spectator. in the init function I set up a new renderer and attach to div outside the scene to create a new canvas. I then call the render method inside the tick() part of the lifecycle.
I have not worked out how to isolate the movement of this camera yet. The default look controls of the 0.3.0 aframe scene still control both camera
Source code:
https://gist.github.com/derickson/334a48eb1f53f6891c59a2c137c180fa
I've created a set of components that can help with this. https://github.com/diarmidmackenzie/aframe-multi-camera
Here's an example showing usage with A-Frame 1.2.0 to display the main camera on the left half of the screen, and a secondary camera on the right half.
<!DOCTYPE html>
<html>
<head>
<script src="https://aframe.io/releases/1.2.0/aframe.min.js"></script>
<script src="https://cdn.jsdelivr.net/gh/diarmidmackenzie/aframe-multi-camera#latest/src/multi-camera.min.js"></script>
</head>
<body>
<div>
<a-scene>
<a-entity camera look-controls wasd-controls position="0 1.6 0">
<!-- first secondary camera is a child of the main camera, so that it always has the same position / rotation -->
<!-- replace main camera (since main camera is rendered across the whole screen, which we don't want) -->
<a-entity
id="camera1"
secondary-camera="outputElement:#viewport1;sequence: replace"
>
</a-entity>
</a-entity>
<!-- PUT YOUR SCENE CONTENT HERE-->
<!-- position of 2nd secondary camera-->
<a-entity
id="camera2"
secondary-camera="outputElement:#viewport2"
position="8 1.6 -6"
rotation="0 90 0"
>
</a-entity>
</a-scene>
</div>
<!-- standard HTML to contrl layout of the two viewports-->
<div style="width: 100%; height:100%; display: flex">
<div id="viewport1" style="width: 50%; height:100%"></div>
<div id="viewport2" style="width: 50%; height:100%"></div>
</div>
</body>
</html>
Also here as a glitch: https://glitch.com/edit/#!/recondite-polar-hyssop
It's also been suggested that I post the entire source code for the multi-camera component here.
Here it is...
/* System that supports capture of the the main A-Frame render() call
by add-render-call */
AFRAME.registerSystem('add-render-call', {
init() {
this.render = this.render.bind(this);
this.originalRender = this.el.sceneEl.renderer.render;
this.el.sceneEl.renderer.render = this.render;
this.el.sceneEl.renderer.autoClear = false;
this.preRenderCalls = [];
this.postRenderCalls = [];
this.suppresssDefaultRenderCount = 0;
},
addPreRenderCall(render) {
this.preRenderCalls.push(render)
},
removePreRenderCall(render) {
const index = this.preRenderCalls.indexOf(render);
if (index > -1) {
this.preRenderCalls.splice(index, 1);
}
},
addPostRenderCall(render) {
this.postRenderCalls.push(render)
},
removePostRenderCall(render) {
const index = this.postRenderCalls.indexOf(render);
if (index > -1) {
this.postRenderCalls.splice(index, 1);
}
else {
console.warn("Unexpected failure to remove render call")
}
},
suppressOriginalRender() {
this.suppresssDefaultRenderCount++;
},
unsuppressOriginalRender() {
this.suppresssDefaultRenderCount--;
if (this.suppresssDefaultRenderCount < 0) {
console.warn("Unexpected unsuppression of original render")
this.suppresssDefaultRenderCount = 0;
}
},
render(scene, camera) {
renderer = this.el.sceneEl.renderer
// set up THREE.js stats to correctly count across all render calls.
renderer.info.autoReset = false;
renderer.info.reset();
this.preRenderCalls.forEach((f) => f());
if (this.suppresssDefaultRenderCount <= 0) {
this.originalRender.call(renderer, scene, camera)
}
this.postRenderCalls.forEach((f) => f());
}
});
/* Component that captures the main A-Frame render() call
and adds an additional render call.
Must specify an entity and component that expose a function call render(). */
AFRAME.registerComponent('add-render-call', {
multiple: true,
schema: {
entity: {type: 'selector'},
componentName: {type: 'string'},
sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'}
},
init() {
this.invokeRender = this.invokeRender.bind(this);
},
update(oldData) {
// first clean up any old settings.
this.removeSettings(oldData)
// now add new settings.
if (this.data.sequence === "before") {
this.system.addPreRenderCall(this.invokeRender)
}
if (this.data.sequence === "replace") {
this.system.suppressOriginalRender()
}
if (this.data.sequence === "after" ||
this.data.sequence === "replace")
{
this.system.addPostRenderCall(this.invokeRender)
}
},
remove() {
this.removeSettings(this.data)
},
removeSettings(data) {
if (data.sequence === "before") {
this.system.removePreRenderCall(this.invokeRender)
}
if (data.sequence === "replace") {
this.system.unsuppressOriginalRender()
}
if (data.sequence === "after" ||
data.sequence === "replace")
{
this.system.removePostRenderCall(this.invokeRender)
}
},
invokeRender()
{
const componentName = this.data.componentName;
if ((this.data.entity) &&
(this.data.entity.components[componentName])) {
this.data.entity.components[componentName].render(this.el.sceneEl.renderer, this.system.originalRender);
}
}
});
/* Component to set layers via HTML attribute. */
AFRAME.registerComponent('layers', {
schema : {type: 'number', default: 0},
init: function() {
setObjectLayer = function(object, layer) {
if (!object.el ||
!object.el.hasAttribute('keep-default-layer')) {
object.layers.set(layer);
}
object.children.forEach(o => setObjectLayer(o, layer));
}
this.el.addEventListener("loaded", () => {
setObjectLayer(this.el.object3D, this.data);
});
if (this.el.hasAttribute('text')) {
this.el.addEventListener("textfontset", () => {
setObjectLayer(this.el.object3D, this.data);
});
}
}
});
/* This component has code in common with viewpoint-selector-renderer
However it's a completely generic stripped-down version, which
just delivers the 2nd camera function.
i.e. it is missing:
- The positioning of the viewpoint-selector entity.
- The cursor / raycaster elements.
*/
AFRAME.registerComponent('secondary-camera', {
schema: {
output: {type: 'string', oneOf: ['screen', 'plane'], default: 'screen'},
outputElement: {type: 'selector'},
cameraType: {type: 'string', oneOf: ['perspective, orthographic'], default: 'perspective'},
sequence: {type: 'string', oneOf: ['before', 'after', 'replace'], default: 'after'},
quality: {type: 'string', oneOf: ['high, low'], default: 'high'}
},
init() {
if (!this.el.id) {
console.error("No id specified on entity. secondary-camera only works on entities with an id")
}
this.savedViewport = new THREE.Vector4();
this.sceneInfo = this.prepareScene();
this.activeRenderTarget = 0;
// add the render call to the scene
this.el.sceneEl.setAttribute(`add-render-call__${this.el.id}`,
{entity: `#${this.el.id}`,
componentName: "secondary-camera",
sequence: this.data.sequence});
// if there is a cursor on this entity, set it up to read this camera.
if (this.el.hasAttribute('cursor')) {
this.el.setAttribute("cursor", "canvas: user; camera: user");
this.el.addEventListener('loaded', () => {
this.el.components['raycaster'].raycaster.layers.mask = this.el.object3D.layers.mask;
const cursor = this.el.components['cursor'];
cursor.removeEventListeners();
cursor.camera = this.camera;
cursor.canvas = this.data.outputElement;
cursor.canvasBounds = cursor.canvas.getBoundingClientRect();
cursor.addEventListeners();
cursor.updateMouseEventListeners();
});
}
if (this.data.output === 'plane') {
if (!this.data.outputElement.hasLoaded) {
this.data.outputElement.addEventListener("loaded", () => {
this.configureCameraToPlane()
});
} else {
this.configureCameraToPlane()
}
}
},
configureCameraToPlane() {
const object = this.data.outputElement.getObject3D('mesh');
function nearestPowerOf2(n) {
return 1 << 31 - Math.clz32(n);
}
// 2 * nearest power of 2 gives a nice look, but at a perf cost.
const factor = (this.data.quality === 'high') ? 2 : 1;
const width = factor * nearestPowerOf2(window.innerWidth * window.devicePixelRatio);
const height = factor * nearestPowerOf2(window.innerHeight * window.devicePixelRatio);
function newRenderTarget() {
const target = new THREE.WebGLRenderTarget(width,
height,
{
minFilter: THREE.LinearFilter,
magFilter: THREE.LinearFilter,
stencilBuffer: false,
generateMipmaps: false
});
return target;
}
// We use 2 render targets, and alternate each frame, so that we are
// never rendering to a target that is actually in front of the camera.
this.renderTargets = [newRenderTarget(),
newRenderTarget()]
this.camera.aspect = object.geometry.parameters.width /
object.geometry.parameters.height;
},
remove() {
this.el.sceneEl.removeAttribute(`add-render-call__${this.el.id}`);
if (this.renderTargets) {
this.renderTargets[0].dispose();
this.renderTargets[1].dispose();
}
// "Remove" code does not tidy up adjustments made to cursor component.
// rarely necessary as cursor is typically put in place at the same time
// as the secondary camera, and so will be disposed of at the same time.
},
prepareScene() {
this.scene = this.el.sceneEl.object3D;
const width = 2;
const height = 2;
if (this.data.cameraType === "orthographic") {
this.camera = new THREE.OrthographicCamera( width / - 2, width / 2, height / 2, height / - 2, 1, 1000 );
}
else {
this.camera = new THREE.PerspectiveCamera( 45, width / height, 1, 1000);
}
this.scene.add(this.camera);
return;
},
render(renderer, renderFunction) {
// don't bother rendering to screen in VR mode.
if (this.data.output === "screen" && this.el.sceneEl.is('vr-mode')) return;
var elemRect;
if (this.data.output === "screen") {
const elem = this.data.outputElement;
// get the viewport relative position of this element
elemRect = elem.getBoundingClientRect();
this.camera.aspect = elemRect.width / elemRect.height;
}
// Camera position & layers match this entity.
this.el.object3D.getWorldPosition(this.camera.position);
this.el.object3D.getWorldQuaternion(this.camera.quaternion);
this.camera.layers.mask = this.el.object3D.layers.mask;
this.camera.updateProjectionMatrix();
if (this.data.output === "screen") {
// "bottom" position is relative to the whole viewport, not just the canvas.
// We need to turn this into a distance from the bottom of the canvas.
// We need to consider the header bar above the canvas, and the size of the canvas.
const mainRect = renderer.domElement.getBoundingClientRect();
renderer.getViewport(this.savedViewport);
renderer.setViewport(elemRect.left - mainRect.left,
mainRect.bottom - elemRect.bottom,
elemRect.width,
elemRect.height);
renderFunction.call(renderer, this.scene, this.camera);
renderer.setViewport(this.savedViewport);
}
else {
// target === "plane"
// store off current renderer properties so that they can be restored.
const currentRenderTarget = renderer.getRenderTarget();
const currentXrEnabled = renderer.xr.enabled;
const currentShadowAutoUpdate = renderer.shadowMap.autoUpdate;
// temporarily override renderer proeperties for rendering to a texture.
renderer.xr.enabled = false; // Avoid camera modification
renderer.shadowMap.autoUpdate = false; // Avoid re-computing shadows
const renderTarget = this.renderTargets[this.activeRenderTarget];
renderTarget.texture.encoding = renderer.outputEncoding;
renderer.setRenderTarget(renderTarget);
renderer.state.buffers.depth.setMask( true ); // make sure the depth buffer is writable so it can be properly cleared, see #18897
renderer.clear();
renderFunction.call(renderer, this.scene, this.camera);
this.data.outputElement.getObject3D('mesh').material.map = renderTarget.texture;
// restore original renderer settings.
renderer.setRenderTarget(currentRenderTarget);
renderer.xr.enabled = currentXrEnabled;
renderer.shadowMap.autoUpdate = currentShadowAutoUpdate;
this.activeRenderTarget = 1 - this.activeRenderTarget;
}
}
});

Resources