How can I solve the travelling salesman problem using rust and petgraph? - graph

I have a parser from my raw input to a petgraph::UnGraph structure. I need to find the shortest path that visits all nodes. I found algo::dijkstra, but from what I understood, Dijkstra would only give me the shortest path connecting two specific nodes.
Is there a function in the petgraph library that offers a way to solve the travelling salesman problem easily, or will I need to implement a solver myself? I browsed the documentation, but couldn't find anything, but maybe it's just my limited experience with graph algorithms.

I've been playing with petgraph for a little while and took your question as a challenge.
I find petgraph very powerful, but like many complex systems it is hard to understand and the documentation doesn't give enough examples.
For example what is the diffence between an EdgeReference and an EdgeIndex?
If I have an EdgeReference how do I get an EdgeIndex?
If have an EdgeIndex how do I get the NodeIndexs it connects?
Anyway I created a crude TSP solver using petgraph as a starting point for you. Please note that it is minimally tested, ni_to_n is unneeded, but I left it in case it is useful to you, and many improvements are crying out to be made. But, it should give you some idea how you might take an Ungraph<String, u32> (nodes are city names and edge weights are u32 distances) and get to an approximate TSP solution.My basic strategy is to use petgraph's min_spanning_tree() then to create a cycle.See the comments below for more.
I hope this is useful, if you improve it, please post!
use petgraph::algo::min_spanning_tree;
use petgraph::data::FromElements;
use petgraph::graph::{EdgeIndex, NodeIndex, UnGraph};
use std::collections::{HashMap, HashSet, VecDeque};
// function that returns the cycle length of the passed route
fn measure_route(route: &VecDeque<usize>, ddv: &[Vec<u32>]) -> u32 {
let mut len = 0;
for i in 1..route.len() {
len += ddv[route[i - 1]][route[i]];
}
len + ddv[route[0]][route[route.len() - 1]]
}
// Travelling salesman solver - the strategy is:
// 1) create a minimal spanning tree
// 2) reduce all nodes to two or fewer connections by deleting the most expensive connections
// 3) connect all nodes with 0 or 1 connections to each other via the least expensive connections
fn tsp(g: &UnGraph<String, u32>) -> u32 {
// translation collections: NodeIndex <-> usize
let n_to_ni: Vec<NodeIndex> = g.node_indices().collect();
let mut ni_to_n: HashMap<NodeIndex, usize> = HashMap::new();
for (n, ni) in g.node_indices().enumerate() {
ni_to_n.insert(ni, n);
}
// the original distance data in a vector
let mut ddv: Vec<Vec<u32>> = vec![vec![u32::MAX; g.node_count()]; g.node_count()];
for x in 0..g.node_count() {
ddv[x][x] = 0;
for y in x + 1..g.node_count() {
let mut edges = g.edges_connecting(n_to_ni[x], n_to_ni[y]);
let mut shortest_edge = u32::MAX;
while let Some(edge) = edges.next() {
if *edge.weight() < shortest_edge {
shortest_edge = *edge.weight();
}
}
ddv[x][y] = shortest_edge;
ddv[y][x] = shortest_edge;
}
}
// create a graph with only the needed edges to form a minimum spanning tree
let mut mst = UnGraph::<_, _>::from_elements(min_spanning_tree(&g));
// delete most expensive connections to reduce connections to 2 or fewer for each node
'rem_loop: loop {
for ni1 in mst.node_indices() {
let mut ev: Vec<(u32, EdgeIndex)> = vec![];
for ni2 in mst.node_indices() {
if let Some(ei) = mst.find_edge(ni1, ni2) {
ev.push((*mst.edge_weight(ei).unwrap(), ei));
}
}
if ev.len() > 2 {
ev.sort();
mst.remove_edge(ev[2].1);
// since we modified mst, need to start over as one other EdgeIndex will be invalid
continue 'rem_loop;
}
}
break;
}
// build a vector of routes from the nodes
let mut routes: Vec<VecDeque<usize>> = vec![];
let mut no_edges: Vec<usize> = vec![];
let mut visited: HashSet<usize> = HashSet::new();
let mut stack: VecDeque<usize> = VecDeque::default();
for n in 0..mst.node_count() {
if !visited.contains(&n) {
stack.push_back(n);
}
while !stack.is_empty() {
let n2 = stack.pop_front().unwrap();
let mut eflag = false;
visited.insert(n2);
for n3 in 0..mst.node_count() {
if mst.find_edge(n_to_ni[n2], n_to_ni[n3]).is_some() {
eflag = true;
if !visited.contains(&n3) {
stack.push_back(n3);
let mut fflag = false;
for r in routes.iter_mut() {
if r[0] == n2 {
r.push_front(n3);
fflag = true;
} else if r[r.len() - 1] == n2 {
r.push_back(n3);
fflag = true;
} else if r[0] == n3 {
r.push_front(n2);
fflag = true;
} else if r[r.len() - 1] == n3 {
r.push_back(n2);
fflag = true;
}
}
if !fflag {
// not found, create a new VecDeque
let mut vd = VecDeque::default();
vd.push_back(n2);
vd.push_back(n3);
routes.push(vd);
}
}
}
}
if !eflag {
no_edges.push(n2);
}
}
}
// put each node with no edges on the end of a route based on cost
for n in &no_edges {
let mut route_num = usize::MAX;
let mut insert_loc = 0;
let mut shortest = u32::MAX;
for ridx in 0..routes.len() {
if ddv[*n][routes[ridx][0]] < shortest {
shortest = ddv[*n][routes[ridx][0]];
route_num = ridx;
insert_loc = 0;
}
if ddv[routes[ridx][routes[ridx].len() - 1]][*n] < shortest {
shortest = ddv[routes[ridx][routes[ridx].len() - 1]][*n];
route_num = ridx;
insert_loc = routes[ridx].len() - 1;
}
}
if route_num == usize::MAX || shortest == u32::MAX {
panic!("unable to deal with singleton node {}", *n);
} else if insert_loc != 0 {
routes[route_num].push_back(*n);
} else {
routes[route_num].push_front(*n);
}
}
// merge routes into a single route based on cost - this could be improved by doing comparisons
// between routes[n] and routes[m] where m != 0 and n != 0
let mut tour = routes[0].clone();
for ridx in 1..routes.len() {
let mut v: Vec<(u32, bool, bool)> = vec![];
v.push((ddv[routes[ridx][routes[ridx].len() - 1]][tour[0]], true, false));
v.push((ddv[routes[ridx][routes[ridx].len() - 1]][tour[tour.len() - 1]], true, true));
v.push((ddv[routes[ridx][0]][tour[0]], false, false));
v.push((ddv[routes[ridx][0]][tour[tour.len() - 1]], false, true));
v.sort_unstable();
match v[0] {
(_, true, false) => {
// end to beginning of tour
for (insert_loc, n) in routes[ridx].iter().enumerate() {
tour.insert(insert_loc, *n);
}
}
(_, true, true) => {
// end to end of tour
let insert_loc = tour.len();
for n in &routes[ridx] {
tour.insert(insert_loc, *n);
}
}
(_, false, false) => {
// beginning to beginning of tour
for n in &routes[ridx] {
tour.push_front(*n);
}
}
(_, false, true) => {
// beginning to end of tour
for n in &routes[ridx] {
tour.push_back(*n);
}
}
}
}
// print out the tour and return its length
dbg!(tour.clone());
measure_route(&tour, &ddv)
}

Related

Why can't this struct method add an element to a vector through a mutable reference?

I have been trying to implement SHA256 as a practice, but I stumbled upon a behavior that I do not fully understand.
I start with a Vec<u8>, where I place the data to be hashed. Then, I pass a mutable reference to the hash function, where it adds the SHA2 padding. The problem is that when the push function is reached within the hash function, it does not add a thing.
I determined this behavior using the debugger, since the program does not crashes, just hangs in the while.
use std::fmt;
struct Sha256 {
state: [u32; 8],
k: [u32; 64]
}
impl fmt::Display for Sha256 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:x?}{:x?}{:x?}{:x?}{:x?}{:x?}{:x?}{:x?}",
self.state[0],self.state[1],self.state[2],self.state[3],
self.state[4],self.state[5],self.state[6],self.state[7]
)
}
}
impl Sha256 {
pub fn new() -> Sha256 {
Sha256 {
state: [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
],
k: [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
]
}
}
pub fn process_block(&mut self, data: &[u8]) {
let mut w = [0u32; 64];
for (i, &d) in data.iter().enumerate() {
let byte = i % 4;
let word = i / 4;
w[word] |= (d as u32) << ((8*(3-byte)) as u32);
}
println!("{:?}", w);
for i in 16..64 {
let s0 = w[i-15].rotate_right(7) ^ w[i-15].rotate_right(18) ^ w[i-15].rotate_right(3);
let s1 = w[i-2].rotate_right(17) ^ w[i-2].rotate_right(19) ^ w[i-2].rotate_right(10);
w[i] = w[i-16].wrapping_add(s0).wrapping_add(w[i-7]).wrapping_add(s1);
}
let mut a = self.state[0];
let mut b = self.state[1];
let mut c = self.state[2];
let mut d = self.state[3];
let mut e = self.state[4];
let mut f = self.state[5];
let mut g = self.state[6];
let mut h = self.state[7];
for i in 0..64 {
let s1 = e.rotate_right(6) ^ e.rotate_right(11) ^ e.rotate_right(25);
let ch = (e & f) ^((!e) & g);
let t1 = h.wrapping_add(s1).wrapping_add(ch).wrapping_add(self.k[i]).wrapping_add(w[i]);
let s0 = a.rotate_right(2) ^ a.rotate_right(13) ^ a.rotate_right(22);
let maj = (a & b)^(a & c)^(b & c);
let t2 = s0.wrapping_add(maj);
h = g;
g = f;
f = e;
e = d.wrapping_add(t1);
d = c;
c = b;
b = a;
a = t1.wrapping_add(t2);
}
self.state[0] = self.state[0].wrapping_add(a);
self.state[1] = self.state[1].wrapping_add(b);
self.state[2] = self.state[2].wrapping_add(c);
self.state[3] = self.state[3].wrapping_add(d);
self.state[4] = self.state[4].wrapping_add(e);
self.state[5] = self.state[5].wrapping_add(f);
self.state[6] = self.state[6].wrapping_add(g);
self.state[7] = self.state[7].wrapping_add(h);
}
pub fn hash(&mut self, v: &mut Vec<u8>) {
v.push(0x80);
while (v.len()%64) < 56 {
v.push(0x00);
}
let size = v.len() as u64;
let mut s_idx = 0;
while s_idx < 8 {
let byte = ((size >> (8*(7-s_idx))) & 0xffu64 ) as u8;
s_idx += 1;
v.push(byte);
}
println!("{:?}", v);
for i in 0..(v.len()/64) {
self.process_block(&v[i*64..(i+1)*64]);
}
}
}
fn main() {
let mut th = Sha256::new();
let mut v = Vec::<u8>::new();
// Sha256::hash(&mut th, &mut v); // This not work
th.hash(&mut v); // Neither do this
println!("{}", th);
}
If I create another function I am able to push data within the function, like this:
fn add_elem(v: &mut Vec<u8>) {
v.push(10);
}
fn main() {
let mut th = Sha256::new();
let mut v = Vec::<u8>::new();
add_elem(&mut v);
th.hash(&mut v);
println!("{}", th);
}
I don't know what I am missing here, because the reference is the same, but it works sometimes and others not.
I am using the Rust 1.59 stable version for Linux and Windows (tested in both systems).
It seems to be a debugger error in this function, since the vector does in fact grow, but it cannot be seen by calling p v in the GDB console.

Constructing a Sparse Tropical Limit Function in Chapel

Given matrices A and B the tropical product is defined to be the usual matrix product with multiplication traded out for addition and addition traded out for minimum. That is, it returns a new matrix C such that,
C_ij = minimum(A_ij, B_ij, A_i1 + B_1j, A_i2 + B_12,..., A_im + B_mj)
Given the underlying adjacency matrix A_g of a graph g, the nth "power" with respect to the tropical product represents the connections between nodes reachable in at most n steps. That is, C_ij = (A**n)_ij has value m if nodes i and j are separated by m<=n edges.
In general, given some graph with N nodes. The diameter of the graph can only be at most N; and, given a graph with diameter k, A**n = A**k for all n>k and the matrix D_ij = A**k is called the "distance matrix" entries representing the distances between all nodes in the graph.
I have written a tropical product function in chapel and I want to write a function that takes an adjacency matrix and returns the resulting distance matrix. I have tried the following approaches to no avail. Guidance in getting past these errors would be greatly appreciated!
proc tropicLimit(A:[] real,B:[] real) {
var R = tropic(A,B);
if A == R {
return A;
} else {
tropicLimit(R,B);
}
}
which threw a domain mismatch error so I made the following edit:
proc tropicLimit(A:[] real,B:[] real) {
var R = tropic(A,B);
if A.domain == R.domain {
if && reduce (A == R) {
return R;
} else {
tropicLimit(R,B);
}
} else {
tropicLimit(R,B);
}
}
which throws
src/MatrixOps.chpl:602: error: control reaches end of function that returns a value
proc tropicLimit(A:[] real,B:[] real) {
var R = tropic(A,B);
if A.domain == R.domain {
if && reduce (A == R) { // Line 605 is this one
} else {
tropicLimit(R,B);
}
} else {
tropicLimit(R,B);
}
return R;
}
Brings me back to this error
src/MatrixOps.chpl:605: error: halt reached - Sparse arrays can't be zippered with anything other than their domains and sibling arrays (CS layout)
I also tried using a for loop with a break condition but that didn't work either
proc tropicLimit(B:[] real) {
var R = tropic(B,B);
for n in B.domain.dim(2) {
var S = tropic(R,B);
if S.domain != R.domain {
R = S; // Intended to just reassign the handle "R" to the contents of "S" i.o.w. destructive update of R
} else {
break;
}
}
return R;
}
Any suggestions?
src/MatrixOps.chpl:605: error: halt reached - Sparse arrays can't be zippered with anything other than their domains and sibling arrays (CS layout)
I believe you are encountering a limitation of zippering sparse arrays in the current implementation, documented in #6577.
Removing some unknowns from the equation, I believe this distilled code snippet demonstrates the issue you are encountering:
use LayoutCS;
var dom = {1..10, 1..10};
var Adom: sparse subdomain(dom) dmapped CS();
var Bdom: sparse subdomain(dom) dmapped CS();
var A: [Adom] real;
var B: [Bdom] real;
Adom += (1,1);
Bdom += (1,1);
A[1,1] = 1.0;
B[1,1] = 2.0;
writeln(A.domain == B.domain); // true
var willThisWork = && reduce (A == B);
// dang.chpl:19: error: halt reached - Sparse arrays can't be zippered with
// anything other than their domains and sibling arrays (CS layout)
As a work-around, I would suggest looping over the sparse indices after confirming the domains are equal and performing a && reduce. This is something you could wrap in a helper function, e.g.
proc main() {
var dom = {1..10, 1..10};
var Adom: sparse subdomain(dom) dmapped CS();
var Bdom: sparse subdomain(dom) dmapped CS();
var A: [Adom] real;
var B: [Bdom] real;
Adom += (1,1);
Bdom += (1,1);
A[1,1] = 1.0;
B[1,1] = 2.0;
if A.domain == B.domain {
writeln(equal(A, B));
}
}
/* Some day, this should be A.equals(B) ! */
proc equal(A: [], B: []) {
// You could also return 'false' if domains do not match
assert(A.domain == B.domain);
var s = true;
forall (i,j) in A.domain with (&& reduce s) {
s &&= (A[i,j] == B[i,j]);
}
return s;
}
src/MatrixOps.chpl:602: error: control reaches end of function that returns a value
This error is a result of not returning something in every condition. I believe you intended to do:
proc tropicLimit(A:[] real,B:[] real) {
var R = tropic(A,B);
if A.domain == R.domain {
if && reduce (A == R) {
return R;
} else {
return tropicLimit(R,B);
}
} else {
return tropicLimit(R,B);
}
}

Is there a way to optimize this code so it doesn't overflow the stack?

I am working on the third Project Euler problem:
fn main() {
println!("{}", p3());
}
fn p3() -> u64 {
let divs = divisors(1, 600851475143, vec![]);
let mut max = 0;
for x in divs {
if prime(x, 0, false) && x > max {
max = x
}
}
max
}
fn divisors(i: u64, n: u64, div: Vec<u64>) -> Vec<u64> {
let mut temp = div;
if i * i > n {
temp
} else {
if n % i == 0 {
temp.push(i);
temp.push(n / i);
}
divisors(i + 2, n, temp)
}
}
fn prime(n: u64, i: u64, skip: bool) -> bool {
if !skip {
if n == 2 || n == 3 {
true
} else if n % 3 == 0 || n % 2 == 0 {
false
} else {
prime(n, 5, true)
}
} else {
if i * i > n {
true
} else if n % i == 0 || n % (i + 2) == 0 {
false
} else {
prime(n, i + 6, true)
}
}
}
The value 600851475143 is the value that is at some point causing it to overflow. If I replace that with any value that is in the 1010 order of magnitude or less, it returns an answer. While keeping it as a recursive solution, is there any way to either:
Increase the stack size?
Optimize my code so it doesn't return a fatal runtime: stack overflow error?
I know this can be done iteratively, but I'd prefer to not do that.
A vector containing 600 * 109 u64s means you'll need 4.8 terabytes of RAM or swapspace.
I'm sure you don't need that for this problem, you're missing some knowledge of math here: scanning till the square root of the 600851475143 will be sufficient. You may also speed up the program by using the Sieve of Eratosthenes.
Project Euler is nice to sharpen your math skills, but it doesn't help you with any programming language in particular. For learning Rust I started with Exercism.
Performing some optimizations, such as going just up to the square root of the number when checking for its factors and for whether it's a prime, I've got:
fn is_prime(n: i64) -> bool {
let float_input = n as f64;
let upper_bound = float_input.sqrt() as i64;
for x in 2..upper_bound + 1 {
if n % x == 0 {
return false;
}
}
return true;
}
fn get_factors(n: i64) -> Vec<i64> {
let mut factors: Vec<i64> = Vec::new();
let float_input = n as f64;
let upper_bound = float_input.sqrt() as i64;
for x in 1..upper_bound + 1 {
if n % x == 0 {
factors.push(x);
factors.push(n / x);
}
}
factors
}
fn get_prime_factors(n: i64) -> Vec<i64> {
get_factors(n)
.into_iter()
.filter(|&x| is_prime(x))
.collect::<Vec<i64>>()
}
fn main() {
if let Some(max) = get_prime_factors(600851475143).iter().max() {
println!("{:?}", max);
}
}
On my machine, this code runs very fast with no overflow.
./problem003 0.03s user 0.00s system 90% cpu 0.037 total
If you really don't want the iterative version:
First, make sure that you compile with optimizations (rustc -O or cargo --release). Without it there's no chance for TCO in Rust. Your divisors function is tail-recursive, but it seems that moving this Vec up and down the recursion stack is confusing enough for LLVM to miss that fact. We can help the compiler a little, by using just a reference here:
fn divisors(i: u64, n: u64, mut div: Vec<u64>) -> Vec<u64> {
divisors_(i, n, &mut div);
div
}
fn divisors_(i: u64, n: u64, div: &mut Vec<u64>) {
if i * i > n {
} else {
if n % i == 0 {
div.push(i);
div.push(n / i);
}
divisors_(i + 2, n, div)
}
}
On my machine that changes make the code no longer segfault.
If you want to increase the stack size anyway, you should run your function in a separate thread with increased stack size (using std::thread::Builder::stack_size)
Rust has reserved the become keyword for guaranteed tail recursion,
so maybe in the future you'll just need to add one keyword to your code to make it work.

Most efficient way to fill a vector from back to front

I am trying to populate a vector with a sequence of values. In order to calculate the first value I need to calculate the second value, which depends on the third value etc etc.
let mut bxs = Vec::with_capacity(n);
for x in info {
let b = match bxs.last() {
Some(bx) => union(&bx, &x.bbox),
None => x.bbox.clone(),
};
bxs.push(b);
}
bxs.reverse();
Currently I just fill the vector front to back using v.push(x) and then reverse the vector using v.reverse(). Is there a way to do this in a single pass?
Is there a way to do this in a single pass?
If you don't mind adapting the vector, it's relatively easy.
struct RevVec<T> {
data: Vec<T>,
}
impl<T> RevVec<T> {
fn push_front(&mut self, t: T) { self.data.push(t); }
}
impl<T> Index<usize> for RevVec<T> {
type Output = T;
fn index(&self, index: usize) -> &T {
&self.data[self.len() - index - 1]
}
}
impl<T> IndexMut<usize> for RevVec<T> {
fn index_mut(&mut self, index: usize) -> &mut T {
let len = self.len();
&mut self.data[len - index - 1]
}
}
The solution using unsafe is below. The unsafe version is slightly more than 2x as fast as the safe version using reverse(). The idea is to use Vec::with_capacity(usize) to allocate the vector, then use ptr::write(dst: *mut T, src: T) to write the elements into the vector back to front. offset(self, count: isize) -> *const T is used to calculate the offset into the vector.
extern crate time;
use std::fmt::Debug;
use std::ptr;
use time::PreciseTime;
fn scanl<T, F>(u : &Vec<T>, f : F) -> Vec<T>
where T : Clone,
F : Fn(&T, &T) -> T {
let mut v = Vec::with_capacity(u.len());
for x in u.iter().rev() {
let b = match v.last() {
None => (*x).clone(),
Some(y) => f(x, &y),
};
v.push(b);
}
v.reverse();
return v;
}
fn unsafe_scanl<T, F>(u : &Vec<T> , f : F) -> Vec<T>
where T : Clone + Debug,
F : Fn(&T, &T) -> T {
unsafe {
let mut v : Vec<T> = Vec::with_capacity(u.len());
let cap = v.capacity();
let p = v.as_mut_ptr();
match u.last() {
None => return v,
Some(x) => ptr::write(p.offset((u.len()-1) as isize), x.clone()),
};
for i in (0..u.len()-1).rev() {
ptr::write(p.offset(i as isize), f(v.get_unchecked(i+1), u.get_unchecked(i)));
}
Vec::set_len(&mut v, cap);
return v;
}
}
pub fn bench_scanl() {
let lo : u64 = 0;
let hi : u64 = 1000000;
let v : Vec<u64> = (lo..hi).collect();
let start = PreciseTime::now();
let u = scanl(&v, |x, y| x + y);
let end= PreciseTime::now();
println!("{:?}\n in {}", u.len(), start.to(end));
let start2 = PreciseTime::now();
let u = unsafe_scanl(&v, |x, y| x + y);
let end2 = PreciseTime::now();
println!("2){:?}\n in {}", u.len(), start2.to(end2));
}

How to eliminate this type of recursion?

This is a bit more intricate than a simple left-recursion or tail-call recursion. So I'm wondering how I can eliminate this kind of recursion. I'm already keeping my own stack as you can see below, so the function needs to no params or return values. However, it's still calling itself up (or down) to a certain level and I want to turn this into a loop, but been scratching my head over this for some time now.
Here's the simplified test case, replacing all "real logic" with printf("dostuff at level #n") messages. This is in Go but the problem is applicable to most languages. Use of loops and goto's would be perfectly acceptable (but I played with this and it gets convoluted, out-of-hand and seemingly unworkable to begin with); however, additional helper functions should be avoided. I guess I should to turn this into some kind of simple state machine, but... which? ;)
As for the practicality, this is to run at about 20 million times per second (stack depth can range from 1 through 25 max later on). This is a case where maintaining my own stack is bound to be more stable / faster than the function call stack. (There are no other function calls in this function, only calculations.) Also, no garbage generated = no garbage collected.
So here goes:
func testRecursion () {
var root *TMyTreeNode = makeSomeDeepTreeStructure()
// rl: current recursion level
// ml: max recursion level
var rl, ml = 0, root.MaxDepth
// node: "the stack"
var node = make([]*TMyTreeNode, ml + 1)
// the recursive and the non-recursive / iterative test functions:
var walkNodeRec, walkNodeIt func ();
walkNodeIt = func () {
log.Panicf("YOUR ITERATIVE / NON-RECURSIVE IDEAS HERE")
}
walkNodeRec = func () {
log.Printf("ENTER LEVEL %v", rl)
if (node[rl].Level == ml) || (node[rl].ChildNodes == nil) {
log.Printf("EXIT LEVEL %v", rl)
return
}
log.Printf("PRE-STUFF LEVEL %v", rl)
for i := 0; i < 3; i++ {
switch i {
case 0:
log.Printf("PRECASE %v.%v", rl, i)
node[rl + 1] = node[rl].ChildNodes[rl + i]; rl++; walkNodeRec(); rl--
log.Printf("POSTCASE %v.%v", rl, i)
case 1:
log.Printf("PRECASE %v.%v", rl, i)
node[rl + 1] = node[rl].ChildNodes[rl + i]; rl++; walkNodeRec(); rl--
log.Printf("POSTCASE %v.%v", rl, i)
case 2:
log.Printf("PRECASE %v.%v", rl, i)
node[rl + 1] = node[rl].ChildNodes[rl + i]; rl++; walkNodeRec(); rl--
log.Printf("POSTCASE %v.%v", rl, i)
}
}
}
// test recursion for reference:
if true {
rl, node[0] = 0, root
log.Printf("\n\n=========>RECURSIVE ML=%v:", ml)
walkNodeRec()
}
// test non-recursion, output should be identical
if true {
rl, node[0] = 0, root
log.Printf("\n\n=========>ITERATIVE ML=%v:", ml)
walkNodeIt()
}
}
UPDATE -- after some discussion here, and further thinking:
I just made up the following pseudo-code which in theory should do what I need:
curLevel = 0
for {
cn = nextsibling(curLevel, coords)
lastnode[curlevel] = cn
if cn < 8 {
if isleaf {
process()
} else {
curLevel++
}
} else if curLevel == 0 {
break
} else {
curLevel--
}
}
Of course the tricky part will be filling out nextsibling() for my custom use-case. But just as a general solution to eliminating inner recursion while maintaining the depth-first traversal order I need, this rough outline should do so in some form or another.
I'm not really sure I understand what it is you want to do since your recursion code looks a little strange. However if I understand the structure of your TMyTreeNode then this is what I would do for a non recursive version.
// root is our root node
q := []*TMyTreeNode{root}
processed := make(map[*TMyTreeNode]bool
for {
l := len(q)
if l < 1 {
break // our queue is empty
}
curr := q[l - 1]
if !processed[curr] && len(curr.childNodes) > 0 {
// do something with curr
processed[curr] = true
q = append(q, curr.childNodes...)
continue // continue on down the tree.
} else {
// do something with curr
processed[curr] = true
q := q[:l-2] // pop current off the queue
}
}
NOTE: This will go arbitrarily deep into the structure. If that's not what you want it will need some modifications.

Resources