zortlord
BAN USERpublic static int numClusters(int[][] board){
if(board == null){
throw new NullPointerException();
}
if(board.length == 0 || board[0].length == 0){
throw new IllegalArgumentException();
}
Worker worker = new Worker(board);
return worker.execute();
}
static class Worker{
int[][] board;
boolean[][] visited;
int xDim;
int yDim;
Worker(int[][] board){
this.board = board;
this.xDim = board.length;
this.yDim = board[0].length;
this.visited = new boolean[this.yDim][this.xDim];
}
int execute(){
int count = 0;
for(int y = 0; y < this.yDim; y++){
for(int x = 0; x < this.xDim; x++){
if(!this.visited[y][x] && this.board[y][x] == 1){
count++;
this.visitCluster(x, y);
}
}
}
return count;
}
void visitCluster(int x, int y){
Stack<int[]> positions = new Stack<int[]>();
positions.add(new int[]{y, x});
while(!positions.isEmpty()){
int[] position = positions.pop();
this.visited[position[0]][position[1]] = true;
positions.addAll(generateMoves(position));
}
}
ArrayList<int[]> getMoves(int[] position){
int xMin = position[1] > 0 ? position[1] -1: position[0];
int xMax = position[1] < this.xDim -1 ? position[1] + 1 : position[1];
int yMin = position[y] > 0 ? position[0] - 1 : position[0];
int yMax = position[0] < this.yDim - 1 ? position[0] + 1 : position[0];
ArrayList<int[]> results = new ArrayList<int[]>();
for(int i = yMin; i < yMax; i++){
for(int j = xMin; j < xMax; j++){
if(!this.visited[i][j] && this.board[i][j] == 1){
results.add(new int[]{i, j});
}
}
}
return results;
}
}
O(n+m) where n is length of list 1 and m is length of list 2.
static class Node{
Node next;
Int value;
}
public static Node mergeSort(Node list1, Node list2){
//add fake root to list
Node result = new Node();
while(list1 != null && list2 != null){
if(list1.value < list2.value){
result.next = list1;
list1 = list1.next;
}
else{
result.next = list2;
list2 = list2.next;
}
}
if(list1 != null){
result.next = list1;
}
else if(list2 != null){
result.next = list2;
}
//remove the fake root
result = result.next;
return result;
}
Why not just do a Binary Search for N? Why all the traversal approaches?
//this is the Tree Construction
static class TreeNode{
TreeNode left, right;
int value;
}
/*
* return null if no smaller value could be found
*/
public static Integer getClosestValue(TreeNode node, int n){
Integer lastSmaller = null;
while(node != null){
if(node.value > n){
//must go left
node = node.left;
}
else if(node.value < n){
lastSmaller = node.value;
//must go right
node = node.right;
}
else{
return node.value;
}
}
return lastSmaller;
}
@Asaf
You are absolutely right about the distance thing. That's what I get for trying to rush.
As for the Overall complexity, I think we are both partially correct- since the PriorityQueue will never have more than k+1 things in it, the complexity incurred by the queue is O(k log k). Therefore, the overall complexity of the solution would be O(n + k log k) and the memory cost would be O(k)
Edited the comment accordingly.
O(n log k) complexity and O(k) memory.
class PointDist{
int dist;
int[] point;
}
public static int[][] findKClosest(int[][] points, int k){
PriorityQueue<PointDist> closest = new PriorityQueue<PointDist>(new Comparator<PointDist>(){
public int compare(PointDist p1, PointDist p2){
return p2.dist - p1.dist;
}
});
for(int[] point : points){
int dist = Math.sqrt(Math.abs(point[0] - 5]) + Math.abs(point[1] -5));
PointDist pointDist = new PointDist();
pointDist.dist = dist;
pointDist.point = point;
closest.add(pointDist);
if(closest.size() > k){
closest.poll();
}
}
int[][] results = new int[closest.size()][];
for(int i = 0; i < results.length; i++){
results[i] = closest.poll().point;
}
return results;
}
Edit: small changes based on Asaf's comments.
- zortlord December 19, 2014This can be solved once you make a few observations. Given the example tree:
1
/ | \
2 5 7
/ \ | |
3 4 6 8
|
9
pre : 1 2 3 4 5 6 7 8 9
post: 3 4 2 6 5 9 8 7 1
The root for that sequence is always the first in the Pre and the last in the Post -> 1
Everything else should be a child of the specified root. Identifying each of the children branches can be identified by simply seeing where and instance in Pre occurs int Post:
the branch headed by 2 contains the contents:
pre: 2 3 4
post: 3 4 2
we also know that this branch has 2 as it's root since 2 is the first and last item. We can then recurse down and see that 2 would have the a branch with only 3 and a branch with only 4.
Extrapolating this process, the tree should be constructable in O(n^2) time (I like java so I made this in Java):
class Node{
int data;
ArrayList<Node> children = new ArrayList<Node>();
}
public static Node buildTree(int[] preOrder, int[] postOrder){
if(preOrder == null || postOrder == null){
throw new NullPointerException();
}
if(preOrder.length != postOrder.length){
throw new IllegalArgumentException();
}
return buildTree(preOrder, 0, preOrder.length-1, postOrder, 0, postOrder.length -1);
}
private static Node buildTree(int[] preOrder, int preMin, int preMax, int[] postOrder, int postMin, int postMax){
//construct the root;
Node root = new Node();
root.data = preOrder[preMin];
//construct the child branches
int preIndex = preMin + 1;
int postIndex = postMin;
while(preIndex <= preMax && postIndex <= postMax -1){
//preOrder[preIndex] is now the root of the next child branch
//find where preOrder[preIndex] occurs in postOrder
int shift = 0;
while(postOrder[postIndex + shift] != preOrder[preIndex]){
shift++;
}
Node child = buildTree(preOrder, preIndex, preIndex + shift, postOrder, postMin, postMin + shift);
root.children.add(child);
shift++;
preIndex += shift;
postIndex += shift;
}
return root;
}
Part of the slowdown in this code is the frequent lookups of Post values matching a given Pre value. That process could be potentially sped up by using a Map of value to index. And that map could be completed in O(n) time reducing the overall runtime of the application from O(n^2) to O(n).
- zortlord December 19, 2014This can be done very simply by creating a directed graph of all the letters and then doing a DFS search starting at every position to compute the longest consecutive String. O(n) complexity to create the graph with O(n) space and O(n) complexity to complete the DFS.
class Graph{
ArrayList<int[]>[][] connections;
Graph(int x, int y){
this.connections = new ArrayList<int[]>[x][y];
for(int i = 0; i < x; i++){
for(int j = 0; j < y; j++){
this.connections[i][j] = new ArrayList<int[]>();
}
}
}
void connect(int[] from, int[] to){
this.connections[from[0]][from[1]].add(to);
}
}
public static int longestConsecutive(char[][] arr){
if(arr == null){
throw new NullPointerException();
}
if(arr.length == 0 || arr[0].length == 0){
throw new IllegalArgumentException();
}
//build graph
Graph g = buildGraph(arr);
//DFS of graph
return depth(g);
}
private static Graph buildGraph(char[][] arr){
int xDim = arr.length;
int yDim = arr[0].length;
Graph g = new Graph(xDim, yDim);
for(int i = 0; i < xDim; i++){
for(int j = 0; j < yDim; j++){
makeConnections(g, arr, i, j);
}
}
}
private static void makeConnections(Graph g, int[][] arr, int x, int y){
int xMin = x > 0? x -1 : x;
int xMax = x < arr.length -1 ? x + 1 : x;
int yMin = y > 0? y -1 : y;
int yMax = y < arr[0].length -1 ? y + 1 : y;
int[] from = new int[]{x, y};
int[] to = new int[2];
int desiredCharInt = ((int)arr[x][i]) - ((int)'a') + 1;
for(int i = xMin; i <= xMax; i++){
for(int j = yMin; j <= yMax; j++){
if(i != x || j != y){
if( ((int)arr[i][j]) - ((int)'a') == desiredCharInt){
to[0] = i;
to[1] = j;
g.connect(from, to);
}
}
}
}
}
private static int depth(Graph g){
Worker worker = new Worker(g);
return worker.execute();
}
static class Worker{
Graph g;
int xDim;
int yDim;
int[][] depth;
Worker(Graph g){
this.g = g;
this.xDim = g.connections.length;
this.yDim = g.connections[0].length;
this.depth = new int[this.xDim][this.yDim];
}
int execute(){
int maxDepth = 0;
for(int i = 0; i < this.xDim; i++){
for(int j = 0; j < this.yDim; j++){
int depth = this.depthRecur(i, j);
if(depth > maxDepth){
maxDepth = depth;
}
}
}
return maxDepth;
}
int depthRecur(int x, int y){
//if the depth is already known, use that
if(this.depth[x][y] > 0){
return this.depth[x][y];
}
//otherwise, check it by traversing the graph
for(int[][] child : this.g.connections[x][y]){
int tempChildDepth = depthRecur(child[0], child[1]);
if(tempChildDepth > depth[x][y]){
this.depth[x][y]= tempChildDepth;
}
}
//add the local node
this.depth[x][y]++;
//set the cached value
return this.depth[x][y];
}
}
If you consider the division operation to simply be not multiplying with that value, then it could be considered that every index value would be the product of all values to the left and all values to the right:
arr[i] = product[0 .. i -1] * product[i_1 .. arr.length]
That observation lends itself well to a recursive function with O(n) complexity and O(n) memory:
public static int[] setValues(int[] arr){
if(arr == null){
throw new NullPointerException();
}
if(arr.length == 0){
throw new IllegalArgumentException();
}
int[] results = new int[arr.length];
setValueRecur(arr, results, 0, 1);
return results;
}
private static int setValueRecur(int[] arr, int[] results, int index, int productToTheLeft){
if(index == results.length){
return 1;
}
int productToTheRight = setValueRecur(arr, results, index+1, productToTheLeft* arr[index]);
results[index] = productToTheLeft * productToTheRight;
return productToTheRight * arr[index]);
}
O(k^2 * n)
public static int maxProfit(int[] arr, int k, int initialSum){
if(arr == null){
throw new NullPointerException();
}
if(k < 1 || initialSum < 1){
throw new IllegalArgumentException();
}
//Create the base Case
Partition base = findBest(arr, 0, arr.length-1);
if(base == null){
return null;
}
ArrayList<Partition> partitions = new ArrayList<Partition>();
if(base.start > 0){
partitions.add(new Partition(0, base.start, false));
}
partitions.add(base);
if(base.end < arr.length -1){
partitions.add(new Partition(base[1], arr.length -1, false));
}
boolean keepWorking = true;
k--;
while(keepWorking && k > 0){
keepWorking = computePartition(arr, partitions);
k--;
}
return computeValue(arr, partitions, initialSum);
}
static class Partition{
int start, end;
boolean counts;
Partition(int start, int end, boolean counts){
this.start = start;
this.end = end;
this.counts = counts;
}
}
private static Partition findBest(int[] arr, int start, int end){
//find the lowest value and the highest values
int lowestVal = Integer.MAX_VALUE;
int lowestIndex = -1;
int highestVal = Integer.MIN_VALUE;
int highestIndex = -1;
for(int i = start; i <= end; i++){
int val = arr[i];
if(val > lowestVal){
lowestVal = val;
lowestIndex = i;
if(val > highestVal){
highestVal = val;
highestIndex = i;
}
}
//if the lowest index is before the highest, return that
if(lowestIndex < highestIndex){
return new Partition(lowestIndex, highestIndex, true);
}
if(lowestIndex == end && highestndex == start){
return null;
}
//find the lowest index before the Highest
Partition lower = findBest(arr, start, lowestIndex);
int lowerScore = Integer.MIN_VALUE;
if(lower != null){
lowerScore = computeValue(arr, Collections.asList(lower), 1000);
}
//find the highest after the lowest
Partition higher = findBest(arr, highestIndex, end);
int higherScore = Integer.MIN_VALUE;
if(higher != null){
higherScore = computeValue(arr, Collections.asList(higher), 1000);
}
//return the partition with the largest difference
if(lowerScore > higherScore){
return lower;
}
return higher;
}
private static Partition findWorst(int[] arr, int start, int end){
//find the lowest value and the highest values
int lowestVal = Integer.MAX_VALUE;
int lowestIndex = -1;
int highestVal = Integer.MIN_VALUE;
int highestIndex = -1;
for(int i = start; i <= end; i++){
int val = arr[i];
if(val > lowestVal){
lowestVal = val;
lowestIndex = i;
if(val > highestVal){
highestVal = val;
highestIndex = i;
}
}
//if the lowest index is after the highest, return that
if(lowestIndex > highestIndex){
return new Partition(lowestIndex, highestIndex, false);
}
if(highestIndex == end && lowestIndex == start){
return null;
}
//find the Highest index before the lowest
Partition lower = findWorst(arr, start, lowestIndex);
int lowerScore = Integer.MAX_VALUE;
if(lower != null){
lowerScore = computeValue(arr, Collections.asList(lower), 1000);
}
//find the Lowest after the Highest
Partition higher = findWorst(arr, highestIndex, end);
int higherScore = Integer.MAX_VALUE;
if(higher != null){
higherScore = computeValue(arr, Collections.asList(higher), 1000);
}
//return the partition with the largest difference
if(lowerScore < higherScore){
return lower;
}
return higher;
}
private static boolean computePartition(int[] arr, ArrayList<Partition> partitions){
int changeLocation = -1;
ArrayList<Partition> changes = null;
int changeScore = 0;
for(int i = 0; i < partition.size(); i++){
Partition partition : partitions.get(i);
ArrayList<Partition> testChanges = new ArrayList<Partition>();
if(partition.counts){
Partition testWorst = findWorst(arr, partition.start, partition.end);
if(testWorst == null){
continue;
}
if(testWorst.start-1 > partition.start){
testChanges.add(new Partition(partition.start, testWorst.start-1, true);
}
testChanges.add(testWorst);
if(testWorst.end+1 < partition.end){
testChanges.add(new Partition(testWorst.end+1, partition.end, true);
}
}
else{
Partition testBest = findBest(arr, partition.start, partition.end);
if(testBest == null){
continue;
}
if(testBest.start-1 > partition.start){
testChanges.add(new Partition(partition.start, testWorst.start-1, false);
}
testChanges.add(testBest);
if(testBest.end+1 < partition.end){
testChanges.add(new Partition(testBest.end+1, partition.end, false);
}
}
int testScore = computeValue(arr, testChanges, 1000);
if(testScore > changeScore){
changes = testChanges;
changeScore = testScore;
changeLocation = i;
}
}
if(changeLocation > -1){
partitions.remove(changeLocation);
for(int i = 0; i < changes.size(); i++){
partitions.add(changes.get(i), changeLocation + i);
}
return true;
}
return false;
}
private static int computeValue(int[] arr, ArrayList<Partition> partitions, int initialSum){
int total = initialSum;
for(Partition partition : partitions){
if(partition.counts){
total /= arr[partition.start];
total *= arr[partition.end];
}
}
return total - intialSum;
}
I'm ignoring the input reading part because that's not very interesting to me. If this were an in-person interview, my experience is that they would not care about this very much. If I had to implement it, I would use some sort of stream parsing, but there wouldn't be much logic.
In writing the actual algorithm that determines the number of things votes that could be satisfied, the logic is actually quite simple: Count the maximum number of voters that vote for each pet to be eliminated. Since each voter can only vote for 1 pet to be eliminated and each voter cannot vote to keep the same pet that they eliminate, those votes would be immediately satisfied. Bucket sort the votes on the what they would eliminate and count the largest bucket. O(v + (c + d) ) where v is the number of votes, c is the number of cats, and d is the number of dogs.
static class Vote{
boolean eliminateIsCat;
int eliminateIndex;
boolean keepIsCat;
int keepIndex;
}
//inputs are parsed into a Collection of Votes
public static int maxSatisfied(Collection<Vote> votes, int numCats, int numDogs){
int[] elimCats = new int[numCats];
int[] elimDogs = new int[numDogs];
for(Vote vote : votes){
if(vote.elimateCat){
elimCats[vote.eliminateIndex]++;
}
else{
elimDogs[vote.eliminatedIndex]++;
}
int maxElim = 0;
for(int i =0; i < numCats; i++){
if(elimCats[i] > maxElim){
maxElim = elimCats[i];
}
}
for(int i = 0; i < numDogs; i++){
if(elimDogs[i] > maxElim){
maxElim = elimDogs[i];
}
}
return maxElim;
}
Used this as the example Tree:
20
/ \
8 22
/ \ / \
5 3 4 25
/ \
10 14
The map idea is a great one. In java, should use the LinkedHashMap to ensure O(n) retrieval of the columns:
static class TreeNode{
TreeNode left, right;
int val;
}
public static void verticalTraverse(TreeNode node){
if(node == null){
return;
}
LinkedHashMap<Integer, ArrayList<TreeNode>> map = new LinkedHashMap<Integer, ArrayList<TreeNode>>();
int rightCount = 0;
verticalTraverseRecur(node, map, rightCount);
StringBuilder builder =new StringBuilder();
for(ArrayList<Node> column : map.values()){
for(Node containedNode : column){
builder.append(containedNode.val);
builder.append(',');
builder.append(' ');
}
builder.append('\n');
}
}
private static void verticalTraverseRecur(TreeNode node, Map<Integer, ArrayList<TreeNode>> map, int rightCount){
//in order traversal to ensure the correct output
if(node.left != null){
verticalTraverseRecur(node.left, map, rightCount -1);
}
//add this position
ArrayList<TreeNode> column = map.get(rightCount);
if(column == null){
column = new ArrayList<TreeNode>();
map.put(rightCount, column);
}
column.add(node);
//go right
if(node.right != null){
verticalTraversRecur(node.right, map, rightCount + 1);
}
}
This can be solved as a sort of hill climbing search algorithm. This approach can be simplified by obtaining an optimal solution within the overall problem by slowly increasing the problem size. Ultimately, however, this approach will take O (n ^2 * m) where n is the size of the array and m is the sum of the values of the array).
public static int getNumMoves(int[] arr, int k){
if(arr == null){
throw new NullPointerException();
}
if(arr.length < 2 || k < 1){
throw new IllegalArgumentException();
}
Worker worker = new Worker(arr, k);
return worker.execute();
}
static class Worker{
int[] arr;
int[] workingArr;
int maxDelta;
Worker(int[] arr, int maxDelta){
this.arr = arr;
this.maxDelta = maxDelta;
this.workingArr = Arrays.copy(arr, 0, arr.length);
}
int execute(){
int count = 0;
for(int i = 1; i < this.arr.length - 1; i++){
count += executeLayer(i);
}
}
int executeLayer(int index){
int diff = workingArr[index] - workingArr[index-1];
int diffRange = Math.abs(diff);
int count = 0;
while(diffRange > k){
//get and set the best next move
this.setMove(this.getBestMove(index, diff > 0))
count++;
diff = workingArr[index] - workingArr[index-1];
diffRange = Math.abs(diff);
}
return count;
}
void setMove(int[] newWorkingArr){
this.workingArr = newWorkingArr;
}
int[] getBestMove(int index, boolean decrease){
int value = -1;
if(decrease){
value = 1;
}
int bestScore = Integer.MAX_VALUE;
int[] bestMove = null;
for(int[] move : generateMoves(index, value)){
int moveScore = getScore(index, move);
if(moveScore < bestScore){
bestMove = move;
}
}
return bestMove;
}
Collection<int[]> generateMoves(int index, int value){
ArrayList<int[]> results = new ArrayList<int[]>();
for(int i = 0; i < index; i++){
int[] newMove = Arrays.copy(this.workingArr, 0, this.workingArr.length);
newMove[index] -= value;
newMove[i] += value;
if(isValid(index, newMove)){
results.add(newMove);
}
}
return results;
}
boolean isValid(int index, int[] arr){
for(int i = 1; i < index; i++){
if(Math.abs(arr[i-1] - arr[i]) > this.k){
return false;
}
}
return true;
}
int getScore(int index, int[] testMove){
int score = 0;
for(int i = 0; i < index; i++){
score += Math.abs(this.workingArr[i] - this.arr[i]);
}
return score;
}
}
static class TreeNode{
TreeNode left, right;
char c;
}
public static void printTree(TreeNode node){
if(node == null){
return;
}
Queue<TreeNode> queue = new Queue<TreeNode>(1);
Quere<TreeNode> tempQueue = new Queue<TreeNode>();
queue.add(node);
StringBuffer buffer = new StringBuffer();
while(queue.size() > 0){
buffer.setLength(0);
while(queue.peek() != null){
TreeNode workingNode = queue.poll)(;
TreeNode childNode = workingNode.left;
if(childNode != null){
tempQueue.add(childNode);
}
childNode = workingNode.right;
if(childNode != null){
tempQueue.add(childNode);
}
buffer.add(workingNode.c);
}
System.out.println(buffer.toString());
Queue tempPointer = queue;
queue = tempQueue;
tempQueue = tempPointer;
}
}
Either BFS or A* would work for this.
Implementing A*.
f(x) = g(x) + h(x) where
g(x) = number of moves completed so far
h(x) = half the cardinal distance between checked position and the desired end
//assuming this is the method that generates valid moves
public static Collection<int[]> getNextMoves(int[] position);
static class Move{
int[] position;
Move lastMove;
int gCost;
int hCost;
Move(int[] position, Move lastMove, int gCost, int hCost){
this.position = position;
this.lastMove = lastMove;
this.gCost = gCost;
this.hCost = hCost;
}
}
public static List<int[]> getPath(int[] start, int[] end){
if(start == null || end == null){
throw new NullPointerException();
}
if(start.length != 2 || end.length != 2){
throw new IllegalArgumentException();
}
PriorityQueue<Move> moveList = new PriorityQueue<Move>(
new Comparator<Move>(){
public int compare(Move m1, Move, m2){
int fM1 = m1.gCost + m1.hCost;
int fM2 = m2.gCost + m2.hCost;
return fM1-fM2;
}
});
moveList.add(new Move(start, null, 0, 0));
Move solution = null;
while(!moveList.isEmpty()){
Move thisMove = moveList.poll();
int[] thisMovePos = thisMove.position;
if(thisMovePos[0] == end[0] && thisMovePos[1] == end[1]){
solution = thisMove;
break;
}
for(int[] nextPos : getNextMoves(thisMovePos)){
moveList.add(new Move(nextPos, thisMove, thisMove.gCost+1, computeHCost(nextPos, end) );
}
}
if(solution != null){
LinkedList<int[]> results = new LinkedList<int[]>();
while(solution != null){
results.addFront(solution.position);
solution = solution.lastMove;
}
return results;
}
return null;
}
private static int computeHCost(int[] pos1, int[] pos2){
return ( Math.abs(pos1[0] - pos2[0]) + Math.abs(pos1[1] - pos2[1]) ) >>> 1;
}
Thinking about this, I could probably make the code generate the fib numbers at the same time as the computation and finish it simpler in O(Fib n)...
public static int countFibFactor(int k){
ArrayList<Integer> fibs = new ArrayList<Integer>();
//build the fibs
fibs.add(0);
int fib = 1;
while(fib < k){
fibs.add(fib);
fib = fibs.get(fibs.size() -1 ) + fibs.get(fibs.size() -2);
}
//greedily remove values from the fib computation
int count = 0;
for(int i = fibs.size() - 1; i > -1; i -- ){
int num = fibs.get(i);
if(k >= num){
k -= num;
count++;
}
if(k == 0){
break;
}
}
return count;
}
Pseudo code:
_1. Generate all the Fibs that are less than or equal to the number
_2. Obtain the numbers that would be needed and count them
___1. use BST to find the largest number in the FIB that is smaller than or equal to the actual number
___2. subtract the BST result from the number
___3. if the new number is 0 return the number of times this repeated otherwise repeat at substep 1
The complexity of this approach should approximate (not sure how to make the exact logarithmic computation due not being able to convert the growth rate of Fib as compared to n so I'm replacing Log with Fib to represent a logarithmic-like Fib growth rate) O( Fib n * Log (Fib n) ) time ( it will take O( Fib n) to compute the Fib values present and O( Fib n * Log (Fib n) ) to execute the repeated BST searches) and it will take O( Fib n) memory):
public static int numFibs(int k){
ArrayList<Integer> fibs = computeFibs(k);
return searches(k, fibs);
}
private static ArrayList<Integer> computeFibs(k){
ArrayList<Integer> results = new ArrayList<Integer>();
if(k == 0){
return results;
}
results.add(1);
if(k == 1){
return results;
}
results.add(1);
int val = -1;
while( (val = results.get(results.size() - 1) + results.get(results.size() - 2) ) <= k){
results.add(val);
}
return results;
}
private static int searches(int k, int[] fibs){
int count = 1;
k -= fibs[fibs.length -1];
int maxIndex = fibs.length - 2;
while(k > 0){
int newIndex = bst(k, fibs, maxIndex);
k-= fibs[newIndex];
count++;
maxIndex = newIndex;
}
return count;
}
private static int bst(int k, int[] arr, int maxIndex){
if(maxIndex == 0){
return 0;
}
int lo = 0;
int hi = arr.length -2;
while(lo < hi){
int mid = ( lo + hi ) >>> 1;
if(arr[mid] > k ){
hi = mid -1;
}
else if(arr[mid+1] < k ){
lo = mid + 1;
}
else{
return mid;
}
}
if(lo == maxIndex){
return lo;
}
else if(arr[lo + 1] < k){
return lo + 1;
}
else{
return lo;
}
}
@Vikas:
Barring a few unrelated semantic errors, the code operates correctly. I've run the code with the input 'a bbbbb c ddddd' and it correctly returned 4.
As to those semantic changes:
I changed line 2 from:
return countDelimChars(str, ' ');
to
return countDelimSubstrings(str, ' ');
and
for(char c : str.getChars()){
to
for(char c : str.toCharArray()){
@rajavni-
What I meant by 'problem complexity' is the slowness of the solution. If the dictionary is a Trie, then that might be faster. If the dictionary were a Map<String,Collection<String>> where the key is the translated string, then that solution would be O(1).
Read the string and count the ' ' characters:
public static int countSpaceDelimSubstrings(String str){
return countDelimSubstrings(str, ' ');
}
public static int countDelimSubstrings(String str, char delim){
if(str == null){
throw new NullPointerException("\"str\" may not be null");
}
int count = 0;
boolean lastWasDelim = true;
for(char c : str.toCharArray()){
if(c != delim){
if(lastWasDelim){
count++;
lastWasDelim = false;
}
}
else{
lastWasDelim = true;
}
}
return count;
}
edits:
Changes some semantic errors that do not affect the logic
'Double checked locking' should only ever be used when declaring the object in question as 'volatile'. If you do not, the RTE may optimize or change the execution order of the branching statements. However, marking something 'volatile' has it's own side effects which, without very careful coding, can make the 'volatile' object no longer thread safe.
Additionally, 'volatile' objects are automatically 'synchronized' when certain accesses are done on them. This could effectively force operations on the connection to run in serial as opposed to parallel. This could run counter to the goal of having multiple users use the same connection.
I still maintain that the simple answer is to use initialization on demand with a static loader.
A large part of this problem complexity depends upon the structure of the Dictionary. if the Dictionary is a list of Strings, then the complexity would be dependent upon the length of the list. If the content is a Trie or some other tree like structure, then you'd have to iterate and find possible solutions.
Assuming the dictionary is a List of Strings and you will run this operation once
public static boolean isOverloaded(List<String> dictionary, String word){
char first = word.charAt(0);
char last = word.charAt(word.length() -1);
int totalSize = word.length();
for(String str : dictionary){
if(str.length() == totalSize
&& str.charAt(0) == first
&& str.charAt(str.length() -1) == last
&& !word.equals(str) ){
return true;
)
)
return false;
}
Double checking synchronization has been shown to fail in java without very specific usage of volatile or final.
Consider instead using initialization on demand through a nested static class as the constructor. Since static stuff is forced to operate once and only once and must be completed prior to successful construction, each classloader would be guaranteed to have their own static instance.
public class Example{
private Example(){
//do nothing
}
private static class LOADER{
private static final Example INSTANCE = new Example();
}
public Example getInstance(){
return LOADER.INSTANCE;
}
}
Here's a KMP implementation:
public static boolean isIn(String str1, String str2){
char[] arr1 = str1.getChars();
char[] arr2 = str2.getChars();
//compute table for str2:
int[] t = computeTable(arr2);
int arr1Index = 0, arr2Index = 0;
while(arr1Index + arr2Index < arr1.length){
if(arr1[arr1Index + arr2Index] == arr2[arr2Index]){
arr2Index++;
if(arr2Index == arr2.length){
return true;
}
}
else{
if(t[arr2Index] > -1){
arr1Index += (arr2Index - t[arr2Index]);
arr2Index = t[arr2Index];
}
else{
arr1Index++;
arr2Index = 0;
}
}
}
return false;
}
private static int[] computeTable(char[] arr){
int[] table = new int[arr.length];
int i = 1; m = 0;
table[0] = -1;
while(i < arr.length){
if(arr[i] == arr[m]){
table[i] = m;
m++;
i++;
}
else if (m > 0){
m = t[m];
else {
table[i] = 0;
i++;
}
}
return table;
}
Here's the cheap and dirty java way but it isn't in the spirit of the question:
public static boolean isIn(String str1, String str2){
if(str1 == null || str2 == null){
throw new NullPointerException();
}
return str1.contains(str2);
}
Here's an implementation that's more in the spirit of the question.
public static boolean isIn(String str1, String str2){
char[] arr1 = str1.getChars();
char[] arr2 = str2.getChars();
outerLoop:
for(int i = 0; i <= arr1.length - arr2.length; i++){
for(int j = 0; j < arr2.length; j++){
if(arr1[i+j] != arr2[i]){
continue outer;
}
}
return true;
}
return false;
}
This is O(n^2). However, there exists a O(n) algorithm using Ukkonen's algorithm to construct a suffix tree for str1 and using that to check str2. However, I've never been able to wrap my head around that algorithm. If someone could implement it here, that would be awesome.
- zortlord December 10, 2014static class Node{
Node left, right;
String name;
}
public static void printTree(Node root){
int level = 1;
ArrayList<Node> list = new ArrayList<Node>(1);
list.add(root);
while(!list.isEmpty()){
System.out.println("***\n* Level "+level+"\n+***");
ArrayList<Node> nextLevel = new ArrayList<Node>();
for(Node node : list){
System.out.println(node.name);
Node tNode = node.left;
if(tNode != null){
nextLevel.add(tNode);
}
tNode = node.right;
if(tNode != null){
nextLevel.add(tNode);
}
}
list = nextLevel;
level++;
}
}
With more time than a standard Interview, I would expect a much better algorithm than this:
Pseudo code:
starting function:
create trie from valid strings
for each column i in char[][]
for each row j in char[][]
execute a DFS starting at char[i][j]
}
}
DFS (i, j):
if i or j are invalid indices in char[][]
return
add position i, j to the working array
if working array is a completed word in the trie
add working array to results
if working array is within the trie
set position i, j as having been explored
DFS(i-1, j)
DFS(i, j-1)
DFS(i+1, j)
DFS(i, j+1)
unset position i,j as having been explored
remove position(i, j) from the working array
Complexity for this solution really stinks. And it will operate at roughly O(n^2*m^2) where n and m are the dimensions of the char[][].
Code:
static class TrieNode{
boolean isWord;
HashMap<Character, TrieNode> nodes = new HashMap<Character, TrieNode>();
}
public static ArrayList<String> getWords(char[][] grid, List<String> words){
TrieNode trie = buildTrie(words);
ArrayList<String> results = new ArrayList<String>();
for(int i = 0; i < grid.length; i++){
for(int j = 0; j < grid[0].length; j++){
dfs(grid, i, j, trie, results);
}
}
return results;
}
private static void dfs(char[][] grid, int i, int j, TrieNode trie, ArrayList<String> results){
ArrayList<Character> working = new ArrayList<Character>();
boolean[][] charUsed = new boolean[grid.length][grid[0].length];
dfsRecur(grid, i, j, working, charUsed, trie, results);
}
private static void dfsRecur(char[][] grid, int i, int j, ArrayList<Character> working, boolean[][] charUsed, TrieNode trie, ArrayList<String> results){
if(i < 0 || i >= grid.length || j < 0 || j >= grid[0].length || charUsed[i][j]){
return;
}
char c = grid[i][j];
TrieNode nextNode = trie.nodes.get(c);
if(nextNode != null){
working.add(c);
if(nextNode.isWord){
char[] wordArr = new char[working.size()];
for(int i = 0; i < wordArr.length; i++){
wordArr[i] = working.get(i);
}
results.add(new String(wordArr));
}
charUsed[i][j] = true;
dfsRecur(grid, i-1, j, working, charUsed, nextNode, results);
dfsRecur(grid, i, j-1, working, charUsed, nextNode, results);
dfsRecur(grid, i+1, j, working, charUsed, nextNode, results);
dfsRecur(grid, i, j+1, working, charUsed, nextNode, results);
charUsed[i][j] = false;
}
}
private static TrieNode buildTrie(List<String> words){
TrieNode root = new TrieNode();
TrieNode node = null;
for(String str : words){
node = root;
int index = 0;
char[] arr = word.getChars();
for(int i = 0; i < arr.length; i++){
char c = arr[i];
TrieNode nextNode = node.nodes.get(c);
if(nextNode == null){
nextNode = new TrieNode();
node.nodes.put(c, nextNode);
}
node = nextNode;
}
node.isWord = true;
}
return root;
}
Convert the dependencies into a Graph and use a Topological Sort. Should be O(m + n) where m is the number of dependencies and n is the number of different modules.
//input dependencies- each char[] is one set of dependencies.
//[0] is the thing that has dependencies
//[1]..[n] are the things that are needed before the build can happen
public static ArrayList<Character> getBuildOrder(char[][] dependencies){
if(dependencies == null){
throw new NullPointerException();
}
//construct the graph
Graph g = new Graph();
for(char[] dependency : dependencies){
if(dependency == null || dependency.length < 2){
throw new IllegalArgumentException();
}
char from = dependency[0];
for(int i = 1; i < dependency.length; i++){
g.addConnection(from, dependency[i]);
}
}
return topologicalSort(g);
}
static final int WORKING = 1;
static final int FINISHED = 2;
private static ArrayList<Character> topologicalSort(Graph g){
int[] nodeStatus = new int[g.nodes.size()];
ArrayList<Character> results = new ArrayList<Character>();
for(Character c : nodes){
Integer index = g.getIndex(c);
topologicalSort(g, nodeStatus, results, index);
}
return results;
}
private static void topologicalSort(Graph g, int[] nodeStatus, ArrayList<Character> results, Integer index){
if(nodeStatus[index] == FINISHED){
continue;
}
if(nodeStatus[index] == WORKING){
throw new IllegalArgumentException("Dependency Cycle Detected");
}
nodeStatus[index] = WORKING;
for(Integer childIndex: g.connections.get(index)){
topologicalSort(g, nodeStatus, results, childIndex);
}
nodeStatus[index] = FINISHED;
results.add(g.nodes.get(index));
}
static class Graph{
HashMap<Character, Integer> nodeToIndexMap = new HashMap<Character, Integer>();
ArrayList<Character> nodes = new ArrayList<Character>();
ArrayList<HashSet<Integer>> connections = new ArrayList<HashSet<Character>>();
void addConnection(Character from, Character to){
Integer fromIndex = this.getIndex(from);
Integer toIndex = this.getIndex(to);
this.connections.get(fromIndex).add(toIndex);
}
Integer getIndex(Character c){
Integer index = this.nodeToIndexMap.get(c);
if(index == null){
index = this.nodes.size();
this.nodeToIndexMap.put(c, index);
this.connections.add(new HashSet<Character>());
this.nodes.add(c);
}
return index;
}
}
My experience with things like this, a service would not keep the internal contents in Date sorted order, so that needs to be addressed too.
Using a mix of QuickSort and MergeSort-like operations.
static interface Service{
UserData[] getUserData();
}
static interface UserData{
long userId;
String info;
long date;
}
public static UserData[] getDateSortedUserData(Service[] services){
UserData results = new UserData[0];
//comparator for Java QuickSort
Comparator<UserData> userDataComparator = new Comparator<UserData>{
final long maxVal = (long)Integer.MAX_VALUE;
final long minVal = (long)Integer.MIN_VALUE;
public int compare(UserData data1, UserData data2){
long diff = data2.date - data1.date;
if(diff > maxVal){
return Integer.MAX_VALUE;
}
if(diff < minVal){
return Integer.MIN_VALUE;
}
return (int)diff;
}
}
for(Service service : services){
UserData[] serviceUserData = service.getUserData();
//Java QuickSort
Arrays.sort(serviceUserData, userDataComparator);
//custom merge sort
results = mergeData(results, serviceUserData);
}
return results.toArray(new UserData[results.size()]);
}
private static void mergeData(UserData[] arr1, UserData[] arr2){
UserData[] results = new UserData[arr1.length + arr2.length];
int resultIndex = 0;
int arr1Index = 0;
int arr2Index = 0;
while(arr1Index < arr1.length && arr2Index < arr2.length){
if(arr1[arr1Index].date < arr2[arr2Index].date){
results[resultIndex] = arr1[arr1Index];
arr1Index++;
else{
results[resultIndex] = arr2[arr2Index];
arr2Index++;
}
resultIndex++;
}
while(arr1Index < arr1.length){
results[resultIndex] = arr1[arr1Index];
resultIndex++;
arr1Index++;
}
while(arr2Index < arr2.length){
results[resultIndex] = arr2[arr2Index];
resultIndex++;
arr2Index++;
}
return results;
}
Put the contents of the smaller list into a HashSet and then iterate over the larger list until you find an object that is contained in the HashSet.
public static T getFirstCommonElement(List<T> list1, List<T> list2){
List<T> smallList, largeList;
if(list1.size() < list2.size()){
smallList = list1;
largeList = list2;
}
else{
smallList = list2;
largeList = list1;
}
HashSet<T> set = new HashSet<T>(smallList.size());
set.addAll(smallList);
for(T obj : largeList){
if(set.contains(obj)){
return obj;
}
}
return null;
}
Wouldn't a regular hill climbing algorithm work, especially if it iteratively grows the search space (ie: considers positions 0..1 then 0..2 then 0..3, etc) and the moves are constrained as the moving of 1 between values with only valid moves considered? The cost for each move could simply be
|A[0..i-1] - B[0..i-1] | + ( |B[i] - B[i-1]| > maxDelta ? | B[i] - B[i-1] | : 0 )
and each iterative search could terminate when
| A[i] - B[i] | <= maxDelta
public static int smoothArray(int[] arr, int maxDelta){
if(arr == null){
throw new NullPointerException();
}
if(maxDelta < 1){
throw new IllegalArgumentException();
}
if(arr.length == 0 || arr.length == 1){
return 0;
}
Worker worker = new Worker(arr, maxDelta);
worker.execute();
return worker.getResults();
}
static class Worker{
int[] origArr;
int[] workingArr;
int maxDelta;
Worker(int[] arr, int maxDelta){
this.origArr = arr;
this.workingArr = Arrays.copyOf(this.origArr, this.origArr.length);
this.maxDelta = maxDelta;
}
void execute(){
for(int i = 1; i < this.origArr.length - 1; i++){
executeLocal(i);
}
}
void executeLocal(int index){
int diff = this.workingArr[index -1] - this.workingArr[index];
boolean decrement = diff < 0 ? true : false;
while(Math.abs(diff) > maxDelta){
this.workingArr = bestMove(index, decrement);
diff = this.workingArr[index -1] - this.workingArr[index];
}
}
int[] bestMove(int index, boolean decrement){
int[] moveWorkspace = Arrays.copyOf(this.workingArr, this.workingArr.length);
int[] bestMove = null;
int bestCost = Integer.MAX_VALUE;
if(decrement){
moveWorkspace[index] --;
for(int i = 0; i < index; i++){
moveWorkspace[i]++;
if(isValid(moveWorkspace, index-1)){
int cost = getCost(moveWorkspace, index);
cost += getLocalValidCost(moveWorkspace, index);
if(cost < bestMove){
bestMove = Arrays.copyOf(moveWorkspace, moveWorkspace.length);
}
}
moveWorkspace[i]--;
}
}
else{
moveWorkspace[index] ++;
for(int i = 0; i < index; i++){
moveWorkspace[i]--;
if(isValid(moveWorkspace, index-1)){
int cost = getCost(moveWorkspace, index);
cost += getLocalValidCost(moveWorkspace, index);
if(cost < bestMove){
bestMove = Arrays.copyOf(moveWorkspace, moveWorkspace.length);
}
}
moveWorkspace[i]++;
}
}
return bestMove;
}
int getResults(){
return this.getCost(this.workingArr, this.workingArr.length);
}
int getCost(int[] arr, int length){
int sumDiff = 0;
for(int i = 0; i < length; i++){
sumDiff += Math.abs(this.origArr[i] - arr[i]);
}
return sumDiff;
}
int getLocalValidCost(int[] arr, int index){
//( |B[i] - B[i-1]| > maxDelta ? | B[i] - B[i-1] | : 0 )
int diff = Math.abs(arr[index] - arr[index -1]);
return diff > this.maxDelta ? diff : 0;
}
boolean isValid(int[] arr, int length){
for(int i = 1; i < length; i++){
if(Math.abs(arr[i-1] - arr[i]) > this.maxDelta){
return false;
}
}
return true;
}
}
Assuming that the question allows numbers to be used multiple times
You could attempt a brute force search. This search would ultimately be very, very slow because you'd have to effectively decide whether a given number should be in the sum or not. This can also be sped up by using caching of previously computed summations.
public static boolean canBeSummed(int[] arr, int k){
if(arr == null){
throw new NullPointerException();
}
if(k < 0){
throw new IllegalArgumentException();
}
HashSet<Integer> cache = new HashSet<Integer>();
Stack<Integer> stack = new Stack<Integer>();
stack.put(0);
cache.add(0);
while(!stack.isEmpty()){
int val = stack.pop();
if(this.cache.contains(k - val){
return true;
}
else{
for(Integer i : cache){
int sum = val + i;
if(sum < this.k){
if(!this.cache.contains(sum)){
this.cache.add(sum);
stack.add(sum);
}
}
}
}
}
return false;
}
If each number can only be used once, then the computation could change significantly. I haven't implemented caching here, but it could potentially be implemented. Complexity is O(2^n):
public static boolean canBeSummed(int[] arr, int k){
if(arr == null){
throw new NullPointerException();
}
if(k < 0){
throw new IllegalArgumentException();
}
Worker worker = new Worker(arr, k);
return worker.execute();
}
static class Worker{
int[] arr;
int k;
int index;
int sum;
Worker(int[] arr, int k){
this.arr = arr;
this.k = k;
this.index = 0;
this.sum = 0;
}
boolean execute();
return executeRecur(0, 0);
}
boolean executeRecur(){
if(this.sum == k){
return true;
}
if(this.index == this.arr.length){
return false;
}
int val = this.arr[this.index];
this.index++;
if(executeRecur()){
return true;
}
this.sum += val;
if(executeRecur()){
return true;
}
this.sum -= val;
this.index--;
return false;
}
}
Doesn't list.get(i) take i iterations to find the element and therefore operate in O(n^2) ? With a similar implementation you could do this:
public static List<String> reverseAlternate(List<String> list){
if(list == null) return null;
LinkedList<String> results = new LinkedList<String>();
while(!list.isEmpty()){
String str1 = list.removeFirst();
if(list.isEmpty()){
results.add(str1);
break;
}
String str2 = list.removeFirst();
results.add(str2);
results.add(str1);
}
return results;
}
If you have a tree that stores the height of each branch, then you can use that information to locate the Kth element by simply traversing to the Kth element in the tree. This would take only O(log n) operations. However, most tree implementations do not store height information so this operation can be done in O(log n + k) time by traversing the tree to the smallest element and then navigating the tree until you find the k th item. I've implemented the second approach below:
static class Node<T>{
Node<T> left, right;
T value;
}
public static Object getKthSmallest(Node<T> tree, int k){
if(tree == null){
throw new NullPointerException();
}
if(k < 1){
throw new IllegalArgumentException();
}
Object[] results = getKthSmallestRecur(tree, k);
return results[1];
}
private static Object[] getKthSmallest(Node<T> node, int k){
Object[] tracker;
int found = 0;
//consider left
if(node.left != null){
tracker = getKthSmallest(node.left, k);
if(tracker[1] != null){
return tracker;
}
found += tracker[0];
}
//consider self
found++;
if(found == k){
return new Object[]{k, node.value};
}
if(node.right != null){
tracker = getKthSmallest(node.right, k-found);
if(tracker[1] != null){
return tracker;
}
found += tracker[0];
}
return new Object[]{found, null};
}
I can think of two ways to do this:
1. read two elements of the list at one time and swap them
2. Read the entire list into two separate lists, even elements to one and odds to the other, and then rebuild the lists
both approaches are O(n) and would roughly use the same memory.
Here's approach 1:
//definition of the list:
static class Node<T>{
Node<T> nextNode
T value;
}
public static Node<T> swapAlternates(Node<T> inputList){
if(inputList == null || inputList.nextNode == null){
return inputList;
}
Node<T> head = inputList.nextNode; // pointer to new front
Node<T> endLastSection = null; //don't have one yet
Node<T> node1 = inputList; //first node to swap
Node<T> node2 = inputList.nextNode; //second node to swap
Node<T> end = node2.nextNode; // end of the section for the swapping
node2.nextNode = node1;
node1.nextNode = end;
endLastSection = node1;
while(endLastSection != null){
node1 = endLastSection.nextNode;
if(node1 == null){
break;
}
node2 = node1.nextNode;
if(node2 == null){
break;
}
end = node2.nextNode;
endLastSection.nextNode = node2;
node2.nextNode = node1;
node1.nextNode = end;
endLastSection = node1;
}
return head;
}
Here's the other approach:
public static Node<T> swapAlternates(Node<T> inputList){
Node<T> list1Head;
Node<T> list1End;
Node<T> list2Head;
Node<T> list2End;
boolean list2= false;
//split into two lists
while(inputList != null){
if(list2){
if(list2Head == null){
list2Head = inputList;
list2End = inputList;
}
else{
list2End.nextNode = inputList;
list2End = list2End.nextNode;
}
list2= false;
}
else{
if(list1Head == null){
list1Head = inputList;
list1End = inputList;
}
else{
list1End.nextNode = inputList;
list1End = list1End.nextNode;
}
list2= true;
}
Node<T> temp = inputList.nextNode;
inputList.nextNode = null;
inputList = temp;
}
//rebuild the list for output
list2 = false;
list1End = list1Head;
list2End = list2Head;
inputList = list2End;
Node<T> head = inputList;
list2End = list2End.nextNode;
while(list1End != null && list2End != null){
if(list2){
inputList.nextNode = list2End;
list2End = list2End.nextNode;
list2 = false;
}
else{
inputList.nextNode = list1End;
list1End = list1End.nextNode;
list2 = true;
}
}
while(list1End != null){
inputList.nextNode = list1End;
inputList = inputList.nextNode;
list1End = list1End.nextNode;
}
while(list2End != null){
inputList.nextNode = list2End;
inputList = inputList.nextNode;
list2End = list2End.nextNode;
}
return head;
}
Assuming the meetings sets are passed in as int[][], then the problem can be solved in O(m+n) by simply merging both lists of events and scanning the merged list of occupied times. Merging works because we are searching for places in which nothing is scheduled. If even one of the individuals have something scheduled, then there isn't free time.
pseudo code:
list = merge both lists by sorting on the start of the occupied time (O (m + n) )
workingEnd = 1 (assuming that the calendar starts with 1)
for each event i = 0 .. n
if list[i] start <= working end
if list[i] end >= workingEnd
workingEnd = list[i] end + 1
else
add new event to results with start = workingEnd and end = event[i] start -1
workingEnd = event[i] end
return results
public static int[][] getFreeTimes(int[][] person1, int[][] person2){
//merge
int[][] list = merge(person1, person1);
//do the rest
ArrayList<int[]> results = new ArrayList<int[]>();
int workingEnd = 1;
for(int[] event : list){
if(event[0] <= workingEnd){
if(event[1] >= workingEnd){
workingEnd = event[1]+1;
}
}
else{
results.add(new int[]{workingEnd, event[1]-1});
workingEnd = event[1]+1;
}
}
return results.toArray();
}
private static int[][] merge(int[][] arr1, int[][] arr2){
int i = 0;
int j = 0;
int[][] results = new int[arr1.length + arr2.length][];
int k = 0;
while(i < arr1.length && j < arr2.length){
if(arr1[i][0] <= arr2[j][0]){
results[k] = arr1[i];
i++;
}
else{
results[k] = arr2[j];
j++;
}
k++;
}
for(; i < arr1.length; i++){
results[k] = arr1[i];
k++;
}
for(; j < arr2.length; j++){
results[k] = arr2[j];
k++;
}
return results;
}
Create a DAC relating the letters to each other and create topological sorted value from the DAC to get a suitable ordering. This will be unfortunately O(n*m + p*p) where n is the number of words, m is the number of characters in each word, and p is the number of characters in the alphabet. It will also take O(p*p) memory:
public static Character[] getCharOrder(String[] sortedWords){
if(sortedWords == null){
throw new NullPointerException();
}
Graph<Character> g = generateGraph(sortedWords);
ArrayList<Character> ordering = g.getTopologicalSort();
return ordering.toArray();
}
//Operates only linearly to generate connections ( O(n) time ).
//Could be changed to build connections between every word but that would be
//much slower (O(n^2) time).
private Graph<Character> generateGraph(String[] arr){
Graph<Character> g = new Graph<Character>();
for(int i = 1; i < arr.length; i++){
char[] word1 = arr[i-1].getChars();
char[] word2 = arr[i].getChars();
char[] difference = getFirstDifference(word1, word2);
if(difference != null){
g.addNode(difference[o]);
g.addNode(difference[1]);
g.addConnection(difference[1], difference[0]);
}
}
}
private char[] getFirstDifference(char[] arr1, char[] arr2){
for(int i = 0; i < arr1.length && i < arr2.length; i++){
if(arr1[i] != arr2[i]){
return new char[]{arr1[i], arr2[i]};
}
}
return null;
}
static class Graph<T>{
HashMap<T, Integer> indexMap = new HashMap<T, Integer>();
ArrayList<T> nodeValue = new ArrayList<T>();
ArrayList<HashSet<Integer>> connections = new ArrayList<HashSet<Integer>>();
void addNode(T val){
Integer index = this.indexMap.get(val);
if(index == null){
this.indexMap.put(val, this.nodeValue.size());
this.nodeValue.add(val);
this.nodeValue.add(new HashSet<Integer>());
}
}
void addConnection(T val1, T val2){
Integer i1 = this.indexMap.get(val1);
Integer i2 = this.indexMap.get(val2);
this.connections.get(i1).add(i2);
}
ArrayList<T> getTopologicalSort(){
TopoWorker<T> worker = new TopoWorker<T>(this);
worker.execute();
return worker.result;
}
static class TopoWorker{
final int CHECKING = 1;
final int COMPLETED = 2;
int[] nodeStatus;
Graph g;
ArrayList<T> result;
TopoWorker(Graph g){
this.g = g;
this.nodeStatus = new int[g.nodeValue.size()];
this.result = new ArrayList<T>(g.nodeValue.size());
}
void execute(){
if(this.nodeStatus.length == 0){
return;
}
for(int i = 0; i < this.nodeStatus.length; i++){
executeRecur(i);
}
}
void executeRecur(int i){
if(this.nodeStatus[i] == CHECKING){
throw new IllegalArgumentException("There is no valid ordering"+
"because cycles exist");
}
if(this.nodeStatus[i] == COMPLETED){
return;
}
this.nodeStatus[i] = CHECKING;
for(Integer connTo : g.connections.get(i)){
executeRecur(connTo);
}
this.nodeStatus[i] = COMPLETED;
this.result.add(g.nodeValue.get(i));
}
}
}
public static void move0s(int[] arr){
if(arr == null){
throw new NullPointerException();
}
int front = 0;
int end;
//find index of the first non-0 at the end
for(end = arr.length -1; end > -1 && arr[end] == 0; end--);
//while there are unprocessed values
while(front < end){
//if the first value is a 0
if(arr[front] == 0){
//swap the value
arr[front] = arr[end];
arr[end] = 0;
//find the next non-0 from the left
for(; end > start && arr[end] == 0; end--);
}
front++;
}
}
Using a HashSet like you are to prevent revisiting Positions will not work since it will prevent certain valid paths. It will fail on inputs like:
1, 0, 0, 0, 0, 0, 0
0, 2, 0, 2, 0, 2, 0
0, 0, 0, 0, 0, 0, 0
0, 2, 0, 2, 0, 2, 0
0, 0, 0, 0, 0, 0, 0
0, 0, 0, 0, 0, 0, 0
when it should be able to go (0,0) -> (2, 2) -> (4, 0) -> (6, 2) -> (4, 4) -> (2, 2) -> (0, 4)
So, basically figure out how many jumps can be done with given a game of checkers moving a single piece (a 'king'). This is roughly O(n^2) where n is the dimension of the board (8 in this case). More specifically, completing this via DFS using backtracking.
private static final int DIM = 6;
public static int getNumberOfJumps(int[][] board, Position start){
if(board == null){
throw new NullPointerException();
}
if(board.length < 3 || board.length >= DIM || board[0].length < 3 || board[0].length >= DIM){
throw new IllegalArgumentException();
}
if(start == null){
throw new NullPointerException();
}
if(start.x < 0 || start.x >= DIM || start.y < 0 || start.y >= DIM){
throw new IllegalArgumentException();
}
Worker worker = new Worker(board);
worker.search(start);
}
class Position{
int x;
int y;
Position(int x, int y){
this.x = x;
this.y = y;
}
}
class Worker{
private static final Position[] MOVE_DIRECTIONS = new Position[]{
new Position(-1,-1),
new Position(-1,1),
new Position(1,-1),
new Position(1,1)
};
int[][] board;
boolean[][] jumped;
Worker(int[][] board){
this.board = board;
this.jumped = new boolean[board.length][board[0].length];
}
private boolean isValidMove(Position opponent, Position end){
//destination within the board
if(end.x < 0 || end.x >= DIM || end.y < 0 || end.y >= DIM)
return false;
//destination is open
if(this.board[end.x][end.y] != 0)
return false;
//jumping over an opponent
if(this.board[opponent.x][opponent.y] != 2)
return false;
return true;
}
private Position[] genValidMoveData(Position start){
ArrayList<Position> working = new ArrayList<Position>();
for(Position direction : MOVE_DIRECTIONS){
Position opponent = new Position(start.x + direction.x, start.y + direction.y);
Position end = new Position(opponent.x+direction.x, opponent.y+direction.y);
if(isValidMove(opponent, end) && !jumped[opponent.x][opponent.y]){
working.add(opponent);
working.add(end);
}
}
return working.toArray();
}
int search(Position start){
int max = 0;
Position[] moves = genValidMoveData(start);
for(int i = 0; i < moves.length; i+=2){
Position opponent = moves[i];
Position dest = moves[i+1];
this.jumped[opponent.x][opponent.y] = true;
int val = search(dest)+1;
if(val > max){
max = val;
}
this.jumped[opponent.x][opponent.y] = false;
}
return max;
}
}
A simple BFS should be able to solve this reasonably fast.
public static ArrayList<int[]> getMoves(int[] start, int[] end){
//first eliminate bad input conditions
if(start == null || end == null){
throw new NullPointerException();
}
if(start.length != 2 || start[0] < 0 || start[0] > 7 start[1] < 0 || start[1] > 7 || end.length != 2 ||
end[0] < 0 || end[0] > 7 || end[1] < 0 || end[1] > 7){
throw new IllegalArgumentException();
}
Worker worker = new Worker();
return worker.execute(start, end);
}
//This class does all the BFS work
static class Worker{
//true if the particular space has already been exposed as a viable search
boolean[][] exposed;
Worker(){
this.exposed = new boolean[8][8];
}
//do the actual search. Searches backwards so the results don't have to be reversed
ArrayList<int[]> execute(int[] start, int[] end){
//setup a search queue
exposed[end[0]][end[1]] = true;
Queue<Move> movesToExplore = new Queue<Move>();
movesToExplore.add(new Move(end[0], end[1], null));
Move dest = null;
//do the search
while(!movesToExplore.isEmpty()){
Move move = movesToExplore.poll();
for(Move nextMove : getNextMoves(move)){
if(nextMove.pos[0] == start[0] && nextMove.pos[1] == start[1]){
dest = nextMove;
break;
}
}
}
//since moves store the path backwards, reverse it
ArrayList<int[]> results = new ArrayList<int[]>();
while(dest != null){
results.add(dest.pos);
dest = dest.lastMove;
}
return results;
}
//get all possible un exposed moves from a given move
ArrayList<Move> getNextMoves(Move move){
int x = move.pos[0];
int y = move.pos[1];
ArrayList<Move> results = new ArrayList<Move>();
//if can move left
if(x > 1){
//if can move down
if(y > 0)
results.add(new Move(x -2, y -1, move));
//if can move up
if(y < 7){
results.add(new Move(x-2, y+1, move));
}
//if can move up
if(y < 6){
//if can move left
if(x > 0)
results.add(new Move(x-1, y+2, move));
//if can move right
if(x < 7)
results.add(new Move(x+1, y+2, move));
}
//if can move right
if(x < 6){
//if can move up
if(y < 7)
results.add(new Move(x+2, y+1, move));
//if can move down
if(y > 0
results.add(new Move(x+2, y-1, move));
}
//if can move down
if(y > 1){
//if can move right
if(x < 7){
results.add(new Move(x+1, y-2, move));
//if can move left
if( x > 0)
results.add(new Move(x-1, y-2, move));
}
ArrayList<Move> return = new ArrayList<Move>();
for(Move : results){
if(!exposed[move.pos[0]][move.pos[1]]){
exposed[move.pos[0]][move.pos[1]] = true;
return.add(move);
}
}
return return;
}
}
//possible moves
static class Move{
int[] pos;
Move lastMove;
Move(int x, int y, Move last){
pos = new int[]{x, y};
lastMove = last;
}
}
Done in O(n) complexity with O(n) memory:
public static int[] getSum(int[] arr, in sumTo){
if(arr == null || sumTo < 1){
return null;
}
HashSet<Integer> cache = new HashSet<Integer>();
for(int i : arr){
int otherSum = sumTo - i;
if(cache.contains(otherSum)){
return new int[]{i, otherSum};
}
}
return null;
}
(edit: fixed logical bug)
- zortlord November 26, 2014
- zortlord December 22, 2014