While preparing for an exam I came across a question about hash tables.
I am given a table of length 11 with the following hash function:
h(k,i) = ( k mod 13 + i * (1 + k mod 7) ) mod 11
The hash table is then resized to size 12. So the new hash function becomes:
h'(k,i) = ( k mod 13 + i * (1 + k mod 7) ) mod 12
Which problems occur?
The problem is that the hash function becomes worse.
In the first case, the distribution of different combinations of k and i is very even among the 11 hash bins. In the second case, the distribution is not so even - particularly, the number of combinations of k and i for which the result of the hash function will be 0 is noticably higher.
Of course, during an exam, one would probably have to argue why it is this way. It's somehow related to
k mod 13 being a value between 0 and 12
k mod 7 being a value between 0 and 6 (which divides 12)
maybe, somehow: 11 is a prime number and 12 has many divisors...
but (at least for me) it is hard to find a convincing reasoning that goes beyond these trivial insights. Maybe you have another idea based on that.
import java.util.LinkedHashMap;
import java.util.Map;
public class HashTest
{
public static void main(String[] args)
{
int maxK = 30;
int maxI = 30;
System.out.println(computeFrequencies(h0, maxK, maxI));
System.out.println(computeFrequencies(h1, maxK, maxI));
}
private static Map<Integer, Integer> computeFrequencies(
Hash hash, int maxK, int maxI)
{
Map<Integer, Integer> frequencies =
new LinkedHashMap<Integer, Integer>();
for (int k=0; k<maxK; k++)
{
for (int i=0; i<maxI; i++)
{
int value = hash.compute(k, i);
Integer count = frequencies.get(value);
if (count == null)
{
count = 0;
}
frequencies.put(value, count+1);
}
}
return frequencies;
}
private static interface Hash
{
int compute(int k, int i);
}
private static final Hash h0 = new Hash()
{
#Override
public int compute(int k, int i)
{
return ((k % 13) + i * (1 + (k % 7))) % 11;
}
};
private static final Hash h1 = new Hash()
{
#Override
public int compute(int k, int i)
{
return ((k % 13) + i * (1 + (k % 7))) % 12;
}
};
}
Related
I am trying to map a zero indexed value to a multidimensional array list using the map function so that I can assign data to specific values within the arraylist without having to constantly request that I Dex location. The code I have written is not compiling for some reason.
Could so eone please check it their side please and also criticise where required?
`
package main.java.javatestfiles;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
/**
* #####CORRECTED#####
*
* The purpose of the class is to map a cubic calculation to
* a multidimensional arraylist as the partials iterate over
* the cubic sum of n in the key to value mappings.
*
* Help from : https://www.javatpoint.com/java-map
*
* #author devel
*/
public class Cubic_Mapping_2 {
public static void main(String[] args) {
int j, k, l, i;
double n = 4.0;
double v = Math.pow(n, 3.0);
ArrayList<ArrayList<ArrayList<Integer>>> c = new ArrayList<>();
Map map = new HashMap();
for (j = 0; j <= v-1; j++) {
for (k = 0; k <= n; k++) {
for(l = 0; l <= n; l++) {
for (i = 0; i <= n; i++) {
map.put(c.get(k).get(l).get(i), j);
}
}
}
}
Set set = map.entrySet(); //Converting to Set so that we can traverse
Iterator itr = set.iterator();
while(itr.hasNext()){
//Converting to Map.Entry so that we can get key and value
Map.Entry entry = (Map.Entry) itr.next();
System.out.println(entry.getKey() + " " + entry.getValue());
}
}
}
`
Thank you, look forward to hearing from you.
R
Tried swapping the key and value variables around, <int, arraylist> and tried calling c variable as an output to check content, however that will not work because there is no content it is a mapping schema.
There does not seem to be any error message on compile in VSCode however in eclipse just a generic exception for 0 index.
################################################################
I wrote a new solution -
`
package javatestfiles;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
/**
* The purpose of the class is to map a cubic calculation to
* a multidimensional ArrayList as the partials iterate over
* the cubic sum of n in the key to value mappings.
*
* Help from : https://www.javatpoint.com/java-map
* https://www.baeldung.com/java-multi-dimensional-arraylist
* https://www.w3schools.com/java/java_hashmap.asp
* https://www.geeksforgeeks.org/java-util-hashmap-in-java-with-examples/
*
* #author devel
*/
public class Cubic_Mapping_3 {
static Map<Integer, Object> map = new HashMap<>();
static ArrayList<ArrayList<ArrayList<Object>>> c;
/**
* Populate an ArrayList with n indices cubed.
*
* #param n
* #return Three dimensional ArrayList c
*/
public static ArrayList<ArrayList<ArrayList<Object>>> three_d_list(int n) {
int i, j;
int x_axis = n;
int y_axis = n;
int z_axis = n;
ArrayList<ArrayList<ArrayList<Object>>> c = new ArrayList<>(x_axis);
for (i = 0; i <= x_axis; i++) {
c.add(new ArrayList<ArrayList<Object>>(z_axis));
for (j = 0; j < z_axis; j++) {
c.get(i).add(new ArrayList<Object>(y_axis));
}
}
return c;
}
/**
* Randomly populate the mapped volumetrics with Neurons
* #param neuron
* #param qty - Quantity of neurons
* #param v - Received autonomously from mapping()
*/
public static void populate(Object neuron, int qty, double v) {
Random rand = new Random();
int i;
for (i = 0; i <= qty; i++) {
map.put(rand.nextInt((int) v), neuron);
}
}
/**
* Maps the cubic index length to the ArrayList c and calls the populate()
* autonomously to populate the list and the mapping with the Neurons
*
* #param neuron - An Objectified ArrayList comprising the AI and areas / proximities
* #param qty - The quantity of Neurons to deploy randomly in your mapped volumetrics
* #param n - The index to be used to calculate the cubic size of the volume
*/
public static void mapping(ArrayList<Object> neuron, int qty, int n) {
int j, k, l, i;
double v = Math.pow(n, 3);
ArrayList<ArrayList<ArrayList<Object>>> c = three_d_list(n);
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
for(l = 0; l < n; l++) {
c.get(j).get(k).add(l);
for (i = 0; i < v; i++) {
map.put(i, c.get(j).get(k).get(l));
}
}
}
}
populate(neuron, qty, v);
}
/**
* Clear the data in memory store after use.
*/
public static void clearall() {
map.clear();
c.clear();
}
public static Map<Integer, Object> main(String[] args) {
mapping(null, 0, 0); //Entry point, the autonomy does the rest.
return map;
}
}
`
Here is a heapsort program I've created in Java, but I'm having an issue where it won't run.
I'm not getting any errors during compilation, which made the error hard to identify, but if I comment out the size decrement in my extract maximum function the program will run, so I assume that's where the error is. Unfortunately, that line is crucial to the program functioning properly.
If there's anything simple causing this problem, or if major adjustments need to be made to the program, I'd like to know either way.
All input is welcome.
Update
added main function.
Code can now be copy-and-pasted to run.
public class Heap
{
private int [] data;
private int [] fin;
private int size;
private int tmp = 0;
/**
* Constructor for objects of class Heap
*/
public Heap(int[] A)
{
data = A;
size = data.length;
fin = new int [size];
this.buildHeap(0);
for(int n = size - 1; n >= 0; n--)
{
fin[n] = this.extractMax();
}
}
public int getSize()
{
return size;
}
private void setSize(int i)
{
size = i;
}
public void print()
{
for(int i = 0; i < this.getSize(); i++)
System.out.printf("%d\n", fin[i]);
}
/**
* build heap using top down method
*
* #param i the index of the node being built upon
*/
private void buildHeap(int i)
{
if(i <= (size - 2)/2)
{
buildHeap(2*i + 1);
buildHeap(2*i + 2);
heapify(i);
}
}
/**
* Extract maximum number
*
* #return maximum number of heap
*/
private int extractMax()
{
int n = size;
int store = 0;
store = data[0];
data[0] = data[n - 1];
size--;
this.heapify(0);
return store;
}
/**
* Heapify array
*
* #param i the index to heapify upon
*/
private void heapify(int i)
{
if(2*i + 1 < size && data[2*i + 1] > data[i])
{
if(2*i + 2 < size && data[2*i + 2] > data[2*i + 1])
{
this.exchange(i, 2*i + 2);
heapify(2*i + 2);
}
else
{
this.exchange(i, 2*i + 1);
heapify(2*i + 1);
}
}
if(2*i + 2 < size && data[2*i + 2] > data[i])
{
this.exchange(i, 2*i + 2);
heapify(2*i + 2);
}
}
private boolean exchange(int i, int k)
{
tmp = data[i];
data[i] = data[k];
data[k] = tmp;
return true;
}
public static void main(String [] args)
{
int [] arr = {5,13,2,25,7,17,20,8,4};
Heap heapsort = new Heap(arr);
heapsort.print();
}
}
Ok so I found this article and I am confused by some parts of it. If anyone can explain this process in more depth to me I would greatly appreciate it because I have been trying to code this for 2 months now and still have not gotten a correct version working yet. I am specifically confused about the Persistence part of the article because I mostly do not understand what the author is trying to explain about it and at the bottom of the article he talks about a 2D pseudo code implementation of this but the PerlinNoise_2D function does not make sense to me because after the random value is smoothed and interpolated, it is an integer value but the function takes float values? Underneath the persistence portion there is the octaves part. I do not quite understand because he "adds" the smoothed functions together to get the Perlin function. What does he mean by"adds" because you obviously do not add the values together. So if anyone can explain these parts to me I would be very happy. Thanks.
Here is my code:
import java.awt.Color;
import java.awt.Graphics;
import java.util.Random;
import javax.swing.JFrame;
import javax.swing.JPanel;
#SuppressWarnings("serial")
public class TerrainGen extends JPanel {
public static int layers = 3;
public static float[][][][] noise = new float[16][16][81][layers];
public static int[][][][] octaves = new int[16][16][81][layers];
public static int[][][][] perlin = new int[16][16][81][layers];
public static int[][][] perlinnoise = new int[16][16][81];
public static int SmoothAmount = 3;
public static int interpolate1 = 0;
public static int interpolate2 = 10;
public static double persistence = 0.25;
//generate noise
//smooth noise
//interpolate noise
//perlin equation
public TerrainGen() {
for(int t = 0; t < layers; t++) {
for(int z = 0; z < 81; z++) {
for(int y = 0; y < 16; y++) {
for(int x = 0; x < 16; x++) {
noise[x][y][z][t] = GenerateNoise();
}
}
}
}
for(int t = 0; t < layers; t++) {
SmoothNoise(t);
}
for(int t = 0; t < layers; t++) {
for(int z = 0; z < 81; z++) {
for(int y = 0; y < 16; y++) {
for(int x = 0; x < 16; x++) {
octaves[x][y][z][t] = InterpolateNoise(interpolate1, interpolate2, noise[x][y][z][t]);
}
}
}
}
for(int t = 0; t < layers; t++) {
PerlinNoise(t);
}
}
public static Random generation = new Random(5);
public float GenerateNoise() {
float i = generation.nextFloat();
return i;
}
public void SmoothNoise(int t) {
//Huge smoothing algorithm
}
//Cosine interpolation
public int InterpolateNoise(int base, int top, float input) {
return (int) ((1 - ((1 - Math.cos(input * 3.1415927)) * 0.5)) + top * ((1 - Math.cos(input * 3.1415927)) * 0.5));
}
public void PerlinNoise(int t) {
double f = Math.pow(2.0, new Double(t));
double a = Math.pow(persistence, new Double(t));
for(int z = 0; z < 81; z++) {
for(int y = 0; y < 16; y++) {
for(int x = 0; x < 16; x++) {
perlin[x][y][z][t] = (int) ((octaves[x][y][z][t] * f) * a);
}
}
}
}
public static void main(String [] args) {
JFrame frame = new JFrame();
frame.setSize(180, 180);
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
TerrainGen test = new TerrainGen();
frame.add(test);
frame.setVisible(true);
}
public static int size = 5;
public void paintComponent(Graphics g) {
super.paintComponent(g);
int i = 0;
for(int t = 0; t < 9; t++) {
for(int z = 0; z < 9; z++) {
for(int y = 0; y < 16; y++) {
for(int x = 0; x < 16; x++) {
g.setColor(new Color(perlin[x][y][i][0] * 10, perlin[x][y][i][0] * 10, perlin[x][y][i][0] * 10));
g.fillRect((z * (16 * size)) + (x * size), (t * (16 * size)) + (y * size), size, size);
}
}
i++;
}
}
repaint();
}
}
And I did not include the smoothing part because that was about 400 lines of code to smooth between chunks.
What the article calls persistence is how the amplitude of the higher frequency noises "falls off" when they are combined.
"octaves" are just what the article calls the noise functions at different frequencies.
You take 1.0 and repeatedly multiply by the persistence to get the list of amplitudes to multiply each octave by - e.g. a persistence of 0.8 gives factors 1.0, 0.8, 0.64, 0.512.
The noise is not an integer, his function Noise1 produces noise in the range 0..1 - i.e. variable n is an Int32 bit it returns a float.
The input paramters are integers i.e. The Noise1 function is only evaluated at (1, 0) or (2, 2).
After smoothing/smearing the noise a bit in SmoothNoise_1 the values get interpolated to produce the values inbetween.
Hope that helped!!
this loop makes octaves from 2d noise. same loop would work for 3d perlin...
function octaves( vtx: Vector3 ): float
{
var total = 0.0;
for (var i:int = 1; i < 7; i ++)//num octaves
{
total+= PerlinNoise(Vector3 (vtx.x*(i*i),0.0,vtx.z*(i*i)))/(i*i);
}
return total;//added multiple perlins into noise with 1/2/4/8 etc ratios
}
the best thing i have seen for learning perlin is the following code. instead of hash tables, it uses sin based semi random function. using 2-3 octaves it becomes high quality perlin... the amazing thing is that i ran 30 octave of this on a realtime landscape and it didnt slow down, whereas i used 1 voronoi once and it was slowing. so... amazing code to learn from.
#ifndef __noise_hlsl_
#define __noise_hlsl_
// hash based 3d value noise
// function taken from https://www.shadertoy.com/view/XslGRr
// Created by inigo quilez - iq/2013
// License Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
// ported from GLSL to HLSL
float hash( float n )
{
return frac(sin(n)*43758.5453);
}
float noise( float3 x )
{
// The noise function returns a value in the range -1.0f -> 1.0f
float3 p = floor(x);
float3 f = frac(x);
f = f*f*(3.0-2.0*f);
float n = p.x + p.y*57.0 + 113.0*p.z;
return lerp(lerp(lerp( hash(n+0.0), hash(n+1.0),f.x),
lerp( hash(n+57.0), hash(n+58.0),f.x),f.y),
lerp(lerp( hash(n+113.0), hash(n+114.0),f.x),
lerp( hash(n+170.0), hash(n+171.0),f.x),f.y),f.z);
}
note that sin is expensive on CPU, instead you would use:
function hash ( n: float ): float
{//random -1, 1
var e = ( n *73.9543)%1;
return (e*e*142.05432)%2-1;// fast cpu random by me :) uses e*e rather than sin
}
I'm working on a game which has tank battles on a tiled map. If a tank is on a cell, that cell is considered unpassable in the A* algorithm, therefore, whenever an unit needs to attack another, I need to plan a path which brings the attacker into range (if range=1, then next to the target).
Currently, I use an iterative approach with increasing radius to find a path to a nearby cell and choose a cell which minimizes the A-Cell-B distance. Unfortunately, this is slow for one unit, not to mention for 50 units.
Is there a way to extract a partial path from a regular A* search data structures?
Just for reference, here is the implementation I have.
Set<T> closedSet = U.newHashSet();
Map<T, T> cameFrom = U.newHashMap();
final Map<T, Integer> gScore = U.newHashMap();
final Map<T, Integer> hScore = U.newHashMap();
final Map<T, Integer> fScore = U.newHashMap();
final Comparator<T> smallestF = new Comparator<T>() {
#Override
public int compare(T o1, T o2) {
int g1 = fScore.get(o1);
int g2 = fScore.get(o2);
return g1 < g2 ? -1 : (g1 > g2 ? 1 : 0);
}
};
Set<T> openSet2 = U.newHashSet();
List<T> openSet = U.newArrayList();
gScore.put(initial, 0);
hScore.put(initial, estimation.invoke(initial, destination));
fScore.put(initial, gScore.get(initial) + hScore.get(initial));
openSet.add(initial);
openSet2.add(initial);
while (!openSet.isEmpty()) {
T current = openSet.get(0);
if (current.equals(destination)) {
return reconstructPath(cameFrom, destination);
}
openSet.remove(0);
openSet2.remove(current);
closedSet.add(current);
for (T loc : neighbors.invoke(current)) {
if (!closedSet.contains(loc)) {
int tentativeScore = gScore.get(current)
+ distance.invoke(current, loc);
if (!openSet2.contains(loc)) {
cameFrom.put(loc, current);
gScore.put(loc, tentativeScore);
hScore.put(loc, estimation.invoke(loc, destination));
fScore.put(loc, gScore.get(loc) + hScore.get(loc));
openSet.add(loc);
Collections.sort(openSet, smallestF);
openSet2.add(loc);
} else
if (tentativeScore < gScore.get(loc)) {
cameFrom.put(loc, current);
gScore.put(loc, tentativeScore);
hScore.put(loc, estimation.invoke(loc, destination));
fScore.put(loc, gScore.get(loc) + hScore.get(loc));
Collections.sort(openSet, smallestF);
}
}
}
}
return Collections.emptyList();
A solution that seems to work (replacing the last return Collections.emptyList();):
// if we get here, there was no direct path available
// find a target location which minimizes initial-L-destination
if (closedSet.isEmpty()) {
return Pair.of(false, Collections.<T>emptyList());
}
T nearest = Collections.min(closedSet, new Comparator<T>() {
#Override
public int compare(T o1, T o2) {
int d1 = trueDistance.invoke(destination, o1);
int d2 = trueDistance.invoke(destination, o2);
int c = U.compare(d1, d2);
if (c == 0) {
d1 = trueDistance.invoke(initial, o1);
d2 = trueDistance.invoke(initial, o2);
c = U.compare(d1, d2);
}
return c;
}
});
return Pair.of(true, reconstructPath(cameFrom, nearest));
Where the trueDistance gives the eucleidian distance of two points. (The base algorithm uses a simpler function yielding 1000 for X-X or YY neightbor, 1414 for XY neighbor).
Given a grid of open spots, and a certain number of tiles to place in those spots, what function f(openSpots, tilesToPlace) will give you the number of continuous paths you can form?
Continuous paths are placements of the tiles such that each tile shares an edge with another. (Only corners touching is not good enough. So (0, 1) and (0, 0) are legal, but (1, 1) and (2, 2) is not.)
I already have a function that will find all these paths. However, it only works for small numbers. For larger values, all I need is a count of how many could possibly exist. Here is some data:
For 1 tiles, there are 1 paths.
For 2 tiles, there are 4 paths.
For 3 tiles, there are 22 paths.
For 4 tiles, there are 89 paths.
For 5 tiles, there are 390 paths.
For 6 tiles, there are 1476 paths.
For 7 tiles, there are 5616 paths.
For 8 tiles, there are 19734 paths.
For 9 tiles, there are 69555 paths.
This gets really slow to calculate as the puzzle size increases. I think the asymptotic complexity of my path finding solution is pretty bad.
If there are n tiles, the grid is at most n spots long and wide.
Your problem seems to be at least as difficult as enumerating polyominoes. There are no known fast algorithms for doing this, and the best known algorithms struggle after n=50. I doubt there is a fast way to solve this problem.
I'm not even going to pretend that this is an optimal solution but it might be useful as a reference solution. I think it at least gives the correct answer, although it takes some time. It solves the problem recursively by finding all paths of length n-1, then checking for all possible places it can add one more tile and removing duplicate solutions. It has a particularly ugly part where it checks for duplicate by converting the path to a string and comparing the strings, but it was fast to write.
Here's the output it generates:
n = 1, number of paths found = 1
n = 2, number of paths found = 4
n = 3, number of paths found = 22
n = 4, number of paths found = 113
n = 5, number of paths found = 571
n = 6, number of paths found = 2816
n = 7, number of paths found = 13616
n = 8, number of paths found = 64678
n = 9, number of paths found = 302574
And here's the code:
using System;
using System.Collections.Generic;
using System.Linq;
public struct Tile
{
public Tile(int x, int y) { X = x; Y = y; }
public readonly int X;
public readonly int Y;
public IEnumerable<Tile> GetNeighbours(int gridSize)
{
if (X > 0)
yield return new Tile(X - 1, Y);
if (X < gridSize - 1)
yield return new Tile(X + 1, Y);
if (Y > 0)
yield return new Tile(X, Y - 1);
if (Y < gridSize - 1)
yield return new Tile(X, Y + 1);
}
public override string ToString()
{
return string.Format("({0},{1})", X, Y);
}
}
public class Path
{
public Path(Tile[] tiles) { Tiles = tiles; }
public Tile[] Tiles { get; private set; }
public override string ToString()
{
return string.Join("", Tiles.Select(tile => tile.ToString()).ToArray());
}
}
public class PathFinder
{
public IEnumerable<Path> FindPaths(int n, int gridSize)
{
if (n == 1)
{
for (int x = 0; x < gridSize; ++x)
for (int y = 0; y < gridSize; ++y)
yield return new Path(new Tile[] { new Tile(x, y) });
}
else
{
Dictionary<string, object> pathsSeen = new Dictionary<string, object>();
foreach (Path shortPath in FindPaths(n - 1, gridSize))
{
foreach (Tile tile in shortPath.Tiles)
{
foreach (Tile neighbour in tile.GetNeighbours(gridSize))
{
// Ignore tiles that are already included in the path.
if (shortPath.Tiles.Contains(neighbour))
continue;
Path newPath = new Path(shortPath.Tiles
.Concat(new Tile[] { neighbour })
.OrderBy(t => t.X)
.ThenBy(t => t.Y)
.ToArray());
string pathKey = newPath.ToString();
if (!pathsSeen.ContainsKey(pathKey))
{
pathsSeen[pathKey] = null;
yield return newPath;
}
}
}
}
}
}
static void Main()
{
PathFinder pathFinder = new PathFinder();
for (int n = 1; n <= 9; ++n)
{
List<Path> paths = pathFinder.FindPaths(n, n).ToList();
Console.WriteLine("n = {0}, number of paths found = {1}", n, paths.Count);
//foreach (Path path in paths)
// Console.WriteLine(path.ToString());
}
}
}