setVertexCount API of QGeometryRenderer and its effect on ray casting results - qt

I create a wireframe mesh of two lines between three points:
By these functions:
Qt3DRender::QGeometryRenderer *Utils::createWireframeMesh()
{
Qt3DRender::QGeometryRenderer *mesh = new Qt3DRender::QGeometryRenderer();
Qt3DRender::QGeometry *geometry = new Qt3DRender::QGeometry(mesh);
Qt3DRender::QBuffer *vertexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer,
geometry);
Qt3DRender::QBuffer *indexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::IndexBuffer,
geometry);
QByteArray vertexBufferData;
QByteArray indexBufferData;
int vertexCount = 3; // Three vertices at (0, -1, 0) and (1, 0, 0) and (0, 1, 0)
int lineCount = 2; // Two lines between three vertices
vertexBufferData.resize(vertexCount * 3 * sizeof(float));
indexBufferData.resize(lineCount * 2 * sizeof(ushort));
// Arrow triangle is 2D and is inside XY plane
float *vPtr = reinterpret_cast<float *>(vertexBufferData.data());
vPtr[0] = 0.0f; vPtr[1] = -1.0f; vPtr[2] = 0.0f; // First vertex at (0, -1, 0)
vPtr[3] = 1.0f; vPtr[4] = 0.0f; vPtr[5] = 0.0f; // Second vertex at (1, 0, 0)
vPtr[6] = 0.0f; vPtr[7] = +1.0f; vPtr[8] = 0.0f; // Third vertex at (0, 1, 0)
ushort *iPtr = reinterpret_cast<ushort *>(indexBufferData.data());
iPtr[0] = 0; iPtr[1] = 1; // First line from index 0 to index 1
iPtr[2] = 1; iPtr[3] = 2; // Second line from index 1 to index 2
vertexDataBuffer->setData(vertexBufferData);
indexDataBuffer->setData(indexBufferData);
addPositionAttributeToGeometry(geometry, vertexDataBuffer, vertexCount);
addIndexAttributeToGeometry(geometry, indexDataBuffer, lineCount * 2);
mesh->setInstanceCount(1);
mesh->setIndexOffset(0);
mesh->setFirstInstance(0);
// How to set vertex count here?
mesh->setVertexCount(vertexCount);
mesh->setPrimitiveType(Qt3DRender::QGeometryRenderer::Lines);
mesh->setGeometry(geometry);
return mesh;
}
void Utils::addPositionAttributeToGeometry(Qt3DRender::QGeometry *geometry,
Qt3DRender::QBuffer *buffer, int count)
{
Qt3DRender::QAttribute *posAttribute = new Qt3DRender::QAttribute();
posAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
posAttribute->setBuffer(buffer);
posAttribute->setDataType(Qt3DRender::QAttribute::Float);
posAttribute->setDataSize(3);
posAttribute->setByteOffset(0);
posAttribute->setByteStride(0);
posAttribute->setCount(count);
posAttribute->setName(Qt3DRender::QAttribute::defaultPositionAttributeName());
geometry->addAttribute(posAttribute);
}
void Utils::addIndexAttributeToGeometry(Qt3DRender::QGeometry *geometry,
Qt3DRender::QBuffer *buffer, int count)
{
Qt3DRender::QAttribute *indexAttribute = new Qt3DRender::QAttribute();
indexAttribute->setAttributeType(Qt3DRender::QAttribute::IndexAttribute);
indexAttribute->setBuffer(buffer);
indexAttribute->setDataType(Qt3DRender::QAttribute::UnsignedShort);
indexAttribute->setDataSize(1);
indexAttribute->setByteOffset(0);
indexAttribute->setByteStride(0);
indexAttribute->setCount(count);
geometry->addAttribute(indexAttribute);
}
In above code, I tried three different statements at this line:
// How to set vertex count here?
mesh->setVertexCount(vertexCount);
mesh->setVertexCount(vertexCount * 2);
mesh->setVertexCount(vertexCount * 3);
With these results - I do some ray casting in my 3D scene which are surprisingly affected too:
Documentation explains vertexCount property of Qt3DRender::QGeometryRenderer as:
vertexCount : int
Holds the primitive count.
In my case, primitive count is line count, so I tried it but only one line is drawn:
I'm confused about setVertexCount API. Can anybody give me a hint?

vertexCount is the same value that you would pass to glDrawArrays or glDrawElements, ie it's the number of vertices involved in the drawing. Since you're using indexed rendering, that would typically be the number of indexes (assuming you're drawing all in data in the index array). So in the case above, it should be 4.
Please note we recently fixed a bug with line picking when using primitive restart, but that doesn't affect the code you included above.

Related

Unable to update 2D Vector element value using their index location

vector<vector<int>> arr{
{1,2},
{2,3},
{4,5},
{1,5}
};
vector<vector<int>> adj( m , vector<int> (m, 0));
for (int i = 0; i < arr.size(); i++) {
// Find X and Y of Edges
int x = arr[i][0];
int y = arr[i][1];
// Update value to 1
adj[x][y] = 1;
adj[y][x] = 1;
}
I am trying to update the vector of vector value using the above code but I am getting a segmentation error. How can I change the element value of the 2D vector of a particular location.

How to pass a Pointer member inside a class

I have a callback function (for ROS subscriber of type PointCloud2 msg).
I want to be able to pass a different pointer member inside the callback function because I saw that when I call the function twice there is a problem in the values of the variable that use this pointer.
This is part of my code:
class Planner {
public:
char frame_id[10] = "os1_lidar";
int num_area = 1;
int i = 0; // for create the circular path
float right_point_y = 0;
float left_point_y = 0;
float y_distance = 0; //the distance between the trees in the Y axis
float center_point_y = 0; //the center point between the trees in the y axis after average calculation
float right_center_point_x = 0; //the center point of the right tree in the x axis
float left_center_point_x = 0; //the center point of the left tree in the x axis
float max_center_point_y = 1.4; //the max distance in the 7 axis for sending to arduino the steering angle
//setting the two side area of the trees to filter the point cloud
float minX_r = -5, minY_r = 2, minZ_r = -0.4;
float maxX_r = -2, maxY_r = 4, maxZ_r = -0.1;
float minX_l = -5, minY_l = -4, minZ_l = -0.4;
float maxX_l = -2, maxY_l = -2, maxZ_l = 0.1;
int red, green, blue = 0; //for change the boxes' color outside the lines
bool publish_points = true;
bool publish_markers = true;
ros::Publisher point_cloud_pub;
ros::Publisher center_point_marker_pub;
ros::Publisher right_point_marker_pub;
ros::Publisher left_point_marker_pub;
ros::Publisher marker_area_right_pub;
ros::Publisher marker_area_left_pub;
void Point_Filtering(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &cloud_msg);
}
void Planner::Point_Filtering(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &cloud_msg) {
ROS_INFO_STREAM_ONCE("Getting Data from Lidar");
ROS_INFO_STREAM_ONCE("size " << cloud_msg->size());
ROS_INFO_STREAM_ONCE("width " << cloud_msg->width);
ROS_INFO_STREAM_ONCE("height " << cloud_msg->height);
// Right Line
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered_r(new pcl::PointCloud<pcl::PointXYZ>);
//Left Line
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered_l(new pcl::PointCloud<pcl::PointXYZ>);
//Right Trees Box Filter
pcl::CropBox<pcl::PointXYZ> boxFilter_r;
boxFilter_r.setInputCloud(cloud_msg);
boxFilter_r.setMin(Eigen::Vector4f(minX_r, minY_r, minZ_r, 1.0));
boxFilter_r.setMax(Eigen::Vector4f(maxX_r, maxY_r, maxZ_r, 1.0));
boxFilter_r.setNegative(false);
boxFilter_r.setTranslation(Eigen::Vector3f(0.0, 0.0, 0.0));
boxFilter_r.filter(*cloud_filtered_r);
//Left Trees Box Filter
pcl::CropBox<pcl::PointXYZ> boxFilter_l;
boxFilter_l.setInputCloud(cloud_msg);
boxFilter_l.setMin(Eigen::Vector4f(minX_l, minY_l, minZ_l, 1.0));
boxFilter_l.setMax(Eigen::Vector4f(maxX_l, maxY_l, maxZ_l, 1.0));
boxFilter_l.setNegative(false);
boxFilter_l.setTranslation(Eigen::Vector3f(0.0, 0.0, 0.0));
boxFilter_l.filter(*cloud_filtered_l);
//Right Trees (Y axis)
Eigen::Vector4f centroid_r_y;
pcl::compute3DCentroid(*cloud_filtered_r, centroid_r_y);
right_point_y = centroid_r_y[1];
//Left Trees (Y axis)
Eigen::Vector4f centroid_l_y;
pcl::compute3DCentroid(*cloud_filtered_l, centroid_l_y);
left_point_y = centroid_l_y[1];
y_distance = abs(centroid_l_y[1]) + abs(centroid_r_y[1]);
center_point_y = (abs(centroid_l_y[1]) - abs(centroid_r_y[1])) / 2;
//Right Trees (X axis)
Eigen::Vector4f centroid_r_x;
pcl::compute3DCentroid(*cloud_filtered_r, centroid_r_x);
right_center_point_x = centroid_r_x[0];
// ROS_INFO_STREAM("right_point_x : "<< num_area << " :" << right_center_point_x);
//Left Trees (X axis)
Eigen::Vector4f centroid_l_x;
pcl::compute3DCentroid(*cloud_filtered_l, centroid_l_x);
left_center_point_x = centroid_l_x[0];
}
int main(int argc, char **argv) {
ROS_INFO_STREAM_ONCE("Running");
// Initialize ROS
ros::init(argc, argv, "pcl_filter_node");
ros::NodeHandle nh;
//region 1st
Planner planner;
ros::Subscriber sub = nh.subscribe<pcl::PointCloud<pcl::PointXYZ> >("/fused_points", 1,
&Planner::Point_Filtering, &planner);
planner.num_area = 1;
planner.minX_r = -3;
planner.minX_l = -3;
planner.maxX_r = 0;
planner.maxX_l = 0;
planner.point_cloud_pub = nh.advertise<pcl::PointCloud<pcl::PointXYZ> >("pcl_filtered", 1);
planner.center_point_marker_pub = nh.advertise<visualization_msgs::Marker>("center_marker",
1);
planner.right_point_marker_pub = nh.advertise<visualization_msgs::Marker>("right_marker",
1);
planner.left_point_marker_pub = nh.advertise<visualization_msgs::Marker>("left_x_marker",
1);
planner.marker_area_right_pub = nh.advertise<visualization_msgs::Marker>("right_box_marker",
1);
planner.marker_area_left_pub = nh.advertise<visualization_msgs::Marker>("left_box_marker",
1);
//endregion
//region 2nd
Planner planner_2;
planner_2.num_area = 2;
planner_2.minX_r = -8 ;
planner_2.minX_l = -8;
planner_2.maxX_r = -5;
planner_2.maxX_l = -5;
planner_2.publish_points = false;
planner_2.point_cloud_pub = nh.advertise<pcl::PointCloud<pcl::PointXYZ> >("pcl_filtered_2",
1);
planner_2.center_point_marker_pub = nh.advertise<visualization_msgs::Marker>
("center_marker_2", 1);
planner_2.right_point_marker_pub = nh.advertise<visualization_msgs::Marker>
("right_marker_2", 1);
planner_2.left_point_marker_pub = nh.advertise<visualization_msgs::Marker>
("left_x_marker_2", 1);
planner_2.marker_area_right_pub = nh.advertise<visualization_msgs::Marker>
("right_box_marker_2", 1);
planner_2.marker_area_left_pub = nh.advertise<visualization_msgs::Marker>
("left_box_marker_2", 1);
ros::Subscriber sub2 = nh.subscribe<pcl::PointCloud<pcl::PointXYZ> >("/fused_points", 1,
&Planner::Point_Filtering, &planner_2);
//endregion
ros::Rate r(15);
ros::Time test_time;
while (nh.ok()) {
ros::spinOnce();
r.sleep();
}
}
This code takes the original point clouds, filters it to relevant areas and then calculate centroid in those filtered point clouds.
I want to take this line :
pcl::PointCloudpcl::PointXYZ::Ptr cloud_filtered_r(new
pcl::PointCloudpcl::PointXYZ);
and to put it in the public part of the class so I will be able to change this name of the filtered points.
When I subscribe to the same msg twice the centroid of the second area is equal to the first and this is wrong for me.
Thanks for your help.

Receiving denormalized output texture coordinates in Frag shader

Update
See rationale at the end of my question below
Using WebGL2 I can access a texel by its denormalized coordinates (sorry don't the right lingo for this). That means I don't have to scale them down to 0-1 like I do in texture2D().
However the input to the fragment shader is still the vec2/3 in normalized values.
Is there a way to declare in/out variables in the Vertex and Frag shaders so that I don't have to scale the coordinates?
somewhere in vertex shader:
...
out vec2 TextureCoordinates;
somewhere in frag shader:
...
in vec2 TextureCoordinates;
I would like for TextureCoordinates to be ivec2 and already scaled.
This question and all my other questions on webgl related to general computing using WebGL. We are trying to do tensor (multi-D matrix) operations using WebGL.
We map our data in a few ways to a Texture. The simplest approach we follow is -- assuming we can access our data as a flat array -- to lay it out along the texture's width and go up the texture's height until we're done.
Since our thinking, logic, and calculations are all based on tensor/matrix indices -- inside the fragment shader -- we'd have to map back to/from the X-Y texture coordinates to indices. The intermediate step here is to calculate an offset for a given position of a texel. Then from that offset we can calculate the matrix indices from its strides.
Calculating an offset in webgl 1 for very large textures seems to be taking much longer than webgl2 using the integer coordinates. See below:
WebGL 1 offset calculation
int coordsToOffset(vec2 coords, int width, int height) {
float s = coords.s * float(width);
float t = coords.t * float(height);
int offset = int(t) * width + int(s);
return offset;
}
vec2 offsetToCoords(int offset, int width, int height) {
int t = offset / width;
int s = offset - t*width;
vec2 coords = (vec2(s,t) + vec2(0.5,0.5)) / vec2(width, height);
return coords;
}
WebGL 2 offset calculation in the presence of int coords
int coordsToOffset(ivec2 coords, int width) {
return coords.t * width + coords.s;
}
ivec2 offsetToCoords(int offset, int width) {
int t = offset / width;
int s = offset - t*width;
return ivec2(s,t);
}
It should be clear that for a series of large texture operations we're saving hundreds of thousands of operations just on the offset/coords calculation.
It's not clear why you want do what you're trying to do. It would be better to ask something like "I'm trying to draw an image/implement post processing glow/do ray tracing/... and to do that I want to use un-normalized texture coordinates because " and then we can tell you if your solution is going to work and how to solve it.
In any case, passing int or unsigned int or ivec2/3/4 or uvec2/3/4 as a varying is supported but not interpolation. You have to declare them as flat.
Still, you can pass un-normalized values as float or vec2/3/4 and the convert to int, ivec2/3/4 in the fragment shader.
The other issue is you'll get no sampling using texelFetch, the function that takes texel coordinates instead of normalized texture coordinates. It just returns the exact value of a single pixel. It does not support filtering like the normal texture function.
Example:
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need webgl2");
}
const vs = `
#version 300 es
in vec4 position;
in ivec2 texelcoord;
out vec2 v_texcoord;
void main() {
v_texcoord = vec2(texelcoord);
gl_Position = position;
}
`;
const fs = `
#version 300 es
precision mediump float;
in vec2 v_texcoord;
out vec4 outColor;
uniform sampler2D tex;
void main() {
outColor = texelFetch(tex, ivec2(v_texcoord), 0);
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// create buffers via gl.createBuffer, gl.bindBuffer, gl.bufferData)
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-.5, -.5,
.5, -.5,
0, .5,
],
},
texelcoord: {
numComponents: 2,
data: new Int32Array([
0, 0,
15, 0,
8, 15,
]),
}
});
// make a 16x16 texture
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 16;
ctx.canvas.height = 16;
for (let i = 23; i > 0; --i) {
ctx.fillStyle = `hsl(${i / 23 * 360 | 0}, 100%, ${i % 2 ? 25 : 75}%)`;
ctx.beginPath();
ctx.arc(8, 15, i, 0, Math.PI * 2, false);
ctx.fill();
}
const tex = twgl.createTexture(gl, { src: ctx.canvas });
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// no need to set uniforms since they default to 0
// and only one texture which is already on texture unit 0
gl.drawArrays(gl.TRIANGLES, 0, 3);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
So in response to your updated question it's still not clear what you want to do. Why do you want to pass varyings to the fragment shader? Can't you just do whatever math you want in the fragment shader itself?
Example:
uniform sampler2D tex;
out float result;
// some all the values in the texture
vec4 sum4 = vec4(0);
ivec2 texDim = textureSize(tex, 0);
for (int y = 0; y < texDim.y; ++y) {
for (int x = 0; x < texDim.x; ++x) {
sum4 += texelFetch(tex, ivec2(x, y), 0);
}
}
result = sum4.x + sum4.y + sum4.z + sum4.w;
Example2
uniform isampler2D indices;
uniform sampler2D data;
out float result;
// some only values in data pointed to by indices
vec4 sum4 = vec4(0);
ivec2 texDim = textureSize(indices, 0);
for (int y = 0; y < texDim.y; ++y) {
for (int x = 0; x < texDim.x; ++x) {
ivec2 index = texelFetch(indices, ivec2(x, y), 0).xy;
sum4 += texelFetch(tex, index, 0);
}
}
result = sum4.x + sum4.y + sum4.z + sum4.w;
Note that I'm also not an expert in GPGPU but I have an hunch the code above is not the fastest way because I believe parallelization happens based on output. The code above has only 1 output so no parallelization? It would be easy to change so that it takes a block ID, tile ID, area ID as input and computes just the sum for that area. Then you'd write out a larger texture with the sum of each block and finally sum the block sums.
Also, dependant and non-uniform texture reads are a known perf issue. The first example reads the texture in order. That's cache friendly. The second example reads the texture in a random order (specified by indices), that's not cache friendly.

(JavaFX) - Snake Iteration 2D Matrix at Snakes and Ladders game

I am creating the game "Snakes And Ladders". I am using a GridPane to represent the game board and obviously I want to move through the board in a "snake" way. Just like that: http://prntscr.com/k5lcaq .
When the dice is rolled I want to move 'dice_num' moves forward + your current position, so I am calculating the new index using an 1D array and I convert this index to 2D coordinates (Reverse Row-Major Order).
gameGrid.add(pieceImage, newIndex % ROWS, newIndex / ROWS);
Where gameGrid is the ID of my grid pane, newIndex % ROWS represents the column coordinate and newIndex / ROWS the row coordinate.
PROBLEM 1: The grid pane is iterating in its own way. Just like that: https://prnt.sc/k5lhjx.
Obviously when the 2D array meets coordinates [0,9] , next position is [1,0] but what I actually want as next position is [1,9] (going from 91 to 90).
PROBLEM 2: I want to start counting from the bottom of the grid pane (from number 1, see screenshots) and go all the way up till 100. But how am I supposed to reverse iterate through a 2D array?
You can easily turn the coordinate system upside down with the following conversion:
y' = maxY - y
To get the "snake order", you simply need to check, if the row the index difference is odd or even. For even cases increasing the index should increase the x coordinate
x' = x
for odd cases you need to apply a transformation similar to the y transformation above
x' = xMax - x
The following methods allow you to convert between (x, y) and 1D-index. Note that the index is 0-based:
private static final int ROWS = 10;
private static final int COLUMNS = 10;
public static int getIndex(int column, int row) {
int offsetY = ROWS - 1 - row;
int offsetX = ((offsetY & 1) == 0) ? column : COLUMNS - 1 - column;
return offsetY * COLUMNS + offsetX;
}
public static int[] getPosition(int index) {
int offsetY = index / COLUMNS;
int dx = index % COLUMNS;
int offsetX = ((offsetY & 1) == 0) ? dx : COLUMNS - 1 - dx;
return new int[] { offsetX, ROWS - 1 - offsetY };
}
for (int y = 0; y < ROWS; y++) {
for (int x = 0; x < COLUMNS; x++, i++) {
System.out.print('\t' + Integer.toString(getIndex(x, y)));
}
System.out.println();
}
System.out.println();
for (int j = 0; j < COLUMNS * ROWS; j++) {
int[] pos = getPosition(j);
System.out.format("%d: (%d, %d)\n", j, pos[0], pos[1]);
}
This should allow you to easily modify the position:
int[] nextPos = getPosition(steps + getIndex(currentX, currentY));
int nextX = nextPos[0];
int nextY = nextPos[1];

Finding the center of the diameter of a graphtree using BFS?

So this function, biggest_dist, finds the diameter of a graph(the given graph in the task is always a tree).
What I want it instead to find is to find the center of the diameter, the node with the least maximum distance to all the other nodes.
I "kinda" understand the idea that we can do this by finding the path from u to t (distance between uand tis the diameter) by keeping track of the parent for each node. From there I choose the node in the middle of uand t? My question is how do I implement that for this function here? Will this make it output node 2 for this graph?
int biggest_dist(int n, int v, const vector< vector<int> >& graph)
//n are amount of nodes, v is an arbitrary vertex
{ //This function finds the diameter of thegraph
int INF = 2 * graph.size(); // Bigger than any other length
vector<int> dist(n, INF);
dist[v] = 0;
queue<int> next;
next.push(v);
int bdist = 0; //biggest distance
while (!next.empty()) {
int pos = next.front();
next.pop();
bdist = dist[pos];
for (int i = 0; i < graph[pos].size(); ++i) {
int nghbr = graph[pos][i];
if (dist[nghbr] > dist[pos] + 1) {
dist[nghbr] = dist[pos] + 1;
next.push(nghbr);
}
}
}
return bdist;
}
As a matter of fact, this function does not compute the diameter. It computes the furthest vertex from a given vertex v.
To compute the diameter of a tree, you need first to choose an arbitrary vertex (let's say v), then find the vertex that is furthest away from v (let's say w), and then find a vertex that is furthest away from w, let's sat u. The distance between w and u is the diameter of the tree, but the distance between v and w (what your function is doing) is not guaranteed to be the diameter.
To make your function compute the diameter, you will need to make it return the vertex it found alongside with the distance. Conveniently, it will always be the last element you process, so just make your function remember the last element it processed alongside with the distance to that element, and return them both. Then call your function twice, first from any arbitrary vertex, then from the vertex that the first call returned.
To make it actually find the center, you can also remember the parent for each node during your BFS. To do so, allocate an extra array, say prev, and when you do
dist[nghbr] = dist[pos] + 1;
also do
prev[nghbr] = pos;
Then at the end of the second call to the function, you can just descend bdist/2 times into the prev, something like:
center = lastVertex;
for (int i = 0; i + i < bdist; ++ i) center = prev[center];
So with a little tweaks to your function (making it return the furthest vertex from v and a vertex that is on the middle of that path, and not return the diameter at all), this code is likely to return you the center of the tree (I only tested it on your example, so it might have some off by one errors)
pair<int, int> biggest_dist(int n, int v, const vector< vector<int> >& graph)
{
int INF = 2 * graph.size(); // Bigger than any other length
vector<int> dist(n, INF);
vector<int> prev(n, INF);
dist[v] = 0;
queue<int> next;
next.push(v);
int bdist = 0; //biggest distance
int lastV = v;
while (!next.empty()) {
int pos = next.front();
next.pop();
bdist = dist[pos];
lastV = pos;
for (int i = 0; i < graph[pos].size(); ++i) {
int nghbr = graph[pos][i];
if (dist[nghbr] > dist[pos] + 1) {
dist[nghbr] = dist[pos] + 1;
prev[nghbr] = pos;
next.push(nghbr);
}
}
}
int center = lastV;
for (int i = 0; i + i < bdist; ++ i) center = prev[center];
return make_pair(lastV, center);
}
int getCenter(int n, const vector< vector<int> >& graph)
{
// first call is to get the vertex that is furthest away from vertex 0, where 0 is just an arbitrary vertes
pair<int, int> firstResult = biggest_dist(n, 0, graph);
// second call is to find the vertex that is furthest away from the vertex just found
pair<int, int> secondResult = biggest_dist(n, firstResult.first, graph);
return secondResult.second;
}

Resources