010010.bin
BAN USERFirst, a small nitpick: it's Singleton, not Singelton, but this is probably not the sort of advice you're looking for.
There is nothing in your class that forces usage of the getInstance() method to get a MySingleton instance. Usually, it is considered good practice to make the constructor private, to prevent other code from inadvertently calling the constructor and obtain another instance. If this was part of an interview question, then I'd say this is what the interviewer was looking for - after all, if you really know what the Singleton pattern is all about, you should know that the code should *forbid* creation of other instances. You do it by making the constructor private.
Classic application of recursion + backtracking.
My solution assumes that you are given a starting position in the maze, and that getting out of the maze is equivalent to reach a free position in the borders. With that in mind, all you have to do is branch on every direction (left, right, up, down), recursively finding the path until a border position is reached, or until you have tried every possibility and none worked (meaning it is impossible to get out of the maze from that starting position).
Just be careful to keep track of visited positions in a given path search, so that you don't recurse infinitely. Also, as positions are added to the path, we can keep track of the current path in a vector (C++ jargon for growable array), so that we can print it when we reach one of the border positions.
C++ implementation, tested and (apparently) working:
#include <cassert>
#include <iostream>
#include <sstream>
#include <vector>
using namespace std;
bool find_path_aux(const vector<vector<char> > &maze, vector<vector<bool> > &visited,
vector<string> &path, int curr_x, int curr_y) {
if (curr_x < 0 || curr_x >= (int) maze.size() || curr_y < 0 || curr_y >= (int) maze[0].size())
return true;
if (visited[curr_x][curr_y] || maze[curr_x][curr_y] == 'X')
return false;
assert(maze[curr_x][curr_y] == '_');
visited[curr_x][curr_y] = true;
ostringstream oss;
oss << "( " << curr_x << ", " << curr_y << " )";
path.push_back(oss.str());
for (int i = -1; i < 2; i++)
for (int j = -1; j < 2; j++)
if ((i == 0) != (j == 0))
if (find_path_aux(maze, visited, path, curr_x+i, curr_y+j))
return true;
path.pop_back();
visited[curr_x][curr_y] = false;
return false;
}
bool find_path(const vector<vector<char> > &maze, vector<string> &path, int pos_x, int pos_y) {
path.clear();
vector<vector<bool> > visited(maze.size(), vector<bool>(maze[0].size(), false));
return find_path_aux(maze, visited, path, pos_x, pos_y);
}
int main(void) {
cout << "Enter maze dimensions N and M for an NxM maze, followed by each entry separated by a blank," << endl;
cout << "followed by the initial position" << endl;
cout << "> ";
unsigned rows, cols;
while (cin >> rows >> cols) {
vector<vector<char> > maze(rows, vector<char>(cols));
for (unsigned i = 0; i < rows; i++)
for (unsigned j = 0; j < cols; j++)
cin >> maze[i][j];
unsigned pos_x, pos_y;
cin >> pos_x >> pos_y;
vector<string> path;
if (find_path(maze, path, pos_x, pos_y)) {
if (path.size() > 0)
cout << path[0];
for (vector<string>::size_type i = 1; i < path.size(); i++)
cout << " -> " << path[i];
cout << endl;
} else {
cout << "Impossible" << endl;
}
cout << "> ";
}
return 0;
}
Typical topological sort application. Build a directed graph where a node stores the name of a package. There is an edge from node U to node V if package U depends on V. Run topological sort to find the order of packages installation that does not violate dependency constraints.
A small tweak is necessary to detect cyclic dependencies, but it's easy to do. A graph has a cycle if there are back-edges in the DFS tree; simply keep track of the nodes in the DFS stack and check each traversed edge to make sure that node is not in the current stack already (if it is, a loop was found and we can abort everything).
Full C++ working implementation. Note that it's a lot more code than expected in an interview, probably the scope of the interview was mostly about getting the core graph algorithm right (and not so much about reading and parsing input):
#include <iostream>
#include <sstream>
#include <vector>
#include <map>
#include <cassert>
#include <cctype>
using namespace std;
struct graph_node {
string package;
vector<graph_node*> neighbors;
bool visited;
bool in_stack;
graph_node(const string &pkg_name) : package(pkg_name) { }
};
static graph_node *get_graph_node(map<string, graph_node *> &graph, const string &str) {
if (graph.find(str) == graph.end())
graph[str] = new graph_node(str);
return graph[str];
}
static bool dfs_visit(graph_node *node, vector<graph_node *> &top_sort_out) {
if (node->in_stack)
return false;
if (node->visited)
return true;
node->visited = true;
node->in_stack = true;
for (vector<graph_node *>::iterator neighbor_it = node->neighbors.begin();
neighbor_it != node->neighbors.end();
neighbor_it++)
if (!dfs_visit(*neighbor_it, top_sort_out))
return false;
top_sort_out.push_back(node);
node->in_stack = false;
return true;
}
static string topological_sort(map<string, graph_node *> &graph) {
for (map<string, graph_node*>::iterator node_it = graph.begin();
node_it != graph.end();
node_it++) {
node_it->second->visited = false;
node_it->second->in_stack = false;
}
vector<graph_node *> deps_order;
for (map<string, graph_node*>::iterator node_it = graph.begin();
node_it != graph.end();
node_it++) {
if (!node_it->second->visited && !dfs_visit(node_it->second, deps_order))
return "Dependencies contain cycles";
}
ostringstream res;
if (deps_order.size() > 0)
res << deps_order[0]->package;
for (vector<graph_node *>::size_type i = 1; i < deps_order.size(); i++)
res << ", " << deps_order[i]->package;
return res.str();
}
void parse_dependency(const string &entry, string &pkg_name, vector<string> &deps) {
string::size_type pkg_name_end = entry.find(':');
assert(pkg_name_end < entry.size());
pkg_name = entry.substr(0, pkg_name_end);
string::size_type begin = pkg_name_end+1;
while (begin < entry.size()) {
while (begin < entry.size() && isspace(entry[begin]))
begin++;
string::size_type end = begin;
while (end < entry.size() && entry[end] != ',' && !isspace(entry[end]))
end++;
if (begin < entry.size())
deps.push_back(entry.substr(begin, end-begin));
begin = end+1;
}
}
void print_dependencies(const vector<string> &packages) {
map<string, graph_node *> graph;
/* Build the graph */
for (vector<string>::const_iterator pkg_it = packages.begin();
pkg_it != packages.end();
pkg_it++) {
string pkg_name;
vector<string> deps;
parse_dependency(*pkg_it, pkg_name, deps);
graph_node *node = get_graph_node(graph, pkg_name);
for (vector<string>::const_iterator dep_it = deps.begin();
dep_it != deps.end();
dep_it++)
node->neighbors.push_back(get_graph_node(graph, *dep_it));
}
/* Perform topological sort, or stop if there is a cycle */
string res = topological_sort(graph);
cout << res << endl;
}
int main(void) {
cout << "Enter the number of entries, followed by each entry." << endl;
cout << "An entry is of the form: package_name: dependency1, dependency2, ..." << endl;
cout << "Example: emacs: gcc, build-essential, g++" << endl;
cout << "> ";
unsigned entries_count;
while (cin >> entries_count) {
vector<string> entries(entries_count);
getline(cin, entries[0]); // Ignore last newline
for (unsigned i = 0; i < entries_count; i++)
getline(cin, entries[i]);
print_dependencies(entries);
cout << "> ";
}
return 0;
}
Wouldn't that be level 2 in your example, with 5 nodes?
- 010010.bin August 03, 2015Iterate through both lists and insert each node of each list in a hash table or a balanced binary search tree indexed by (name, phone) key-value pairs (in C++ you can use a set<pair<string, string>>, assuming name and phone are strings). When inserting, if an element is already there, then just ignore the current node and do not insert it.
Then iterate through the set / hash table, building a new list along the way. Each node retrieved from the set is a new node in the linked list.
Time complexity: linear in the size of the lists
Space complexity: linear in the size of the lists
Other possible approaches include sorting both lists to avoid using additional space, but it comes at the cost of raising running time to O(n log(n)); OR you can use nested loops to check if each node exists in the other list or on the rest of the current list, but that would be n^2.
Interesting question. It can be solved in linear time, or, to be more precise, O(N+M) where N and M is the length of each string.
Assume the larger string has size N, and the shorted has size M. A naive approach is to iterate through each index i in the large string, compute the character frequency table of large_str[i..i+M-1], and compare it to the short string's character frequency table - if they are the same, then we report i as an index.
However, computing the character frequency table of each substring from scratch and comparing it against the short string's table is O(M), so this would be O(NM).
We can improve on this and make it O(N+M). First, compute the character frequency table for the short string. Then, compute the character frequency table for large_str[0..M-1]. As you build it for the larger string, keep track of how many unique positions were filled, and how many of those matched with positions in the short string frequency table.
Then it works as if you had a sliding window on the larger string. When you move it right one place, remove the character on the left, and add the new character on the right, updating the number of matched positions so far, along with the number of total positions.
If you implement it correctly, then at any given time you know how many positions you have matched so far and how many unique positions are set in the character frequency table. So you can test if both tables match in O(1) - they are the same it the number of matched positions on the large table is equal to the number of positions filled in the small table, and if the number of total filled positions in the large stable is equal to the number of positions filled in the small table.
Implementation in C below; ran a few test cases and seems to be good. One interesting testcase is "mississippi" with "sis". It correctly finds all overlapping matches.
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <limits.h>
void match_permutations_aux(const char *big_str, const char *short_str, size_t short_len,
const size_t short_freq[UCHAR_MAX+1], size_t nfreq) {
static size_t big_freq[UCHAR_MAX+1];
memset(big_freq, 0, sizeof(big_freq));
size_t total_big = 0;
size_t matched = 0;
size_t i;
for (i = 0; i < short_len; i++) {
unsigned char idx = big_str[i];
if (big_freq[idx] == short_freq[idx] && big_freq[idx] != 0)
matched--;
if (big_freq[idx] == 0)
total_big++;
big_freq[idx]++;
if (big_freq[idx] == short_freq[idx])
matched++;
}
if (matched == nfreq && total_big == nfreq)
printf("0 ");
for (i = short_len; big_str[i] != '\0'; i++) {
unsigned char idx_remove = big_str[i-short_len];
unsigned char idx_add = big_str[i];
assert(big_freq[idx_remove] > 0);
/* Remove leftmost character */
if (big_freq[idx_remove] == short_freq[idx_remove])
matched--;
big_freq[idx_remove]--;
if (big_freq[idx_remove] == short_freq[idx_remove] && big_freq[idx_remove] != 0)
matched++;
if (big_freq[idx_remove] == 0)
total_big--;
/* Add rightmost character */
if (big_freq[idx_add] == 0)
total_big++;
if (big_freq[idx_add] == short_freq[idx_add] && big_freq[idx_add] != 0)
matched--;
big_freq[idx_add]++;
if (big_freq[idx_add] == short_freq[idx_add])
matched++;
if (matched == nfreq && total_big == nfreq)
printf("%zu ", i-short_len+1);
}
}
void match_permutations(const char *str_a, const char *str_b) {
if (str_a == NULL || str_b == NULL)
return;
const char *big_str = str_a;
const char *short_str = str_b;
size_t short_len;
if (strlen(str_b) > strlen(str_a)) {
big_str = str_b;
short_str = str_a;
}
short_len = strlen(short_str);
static size_t short_freq[UCHAR_MAX+1];
memset(short_freq, 0, sizeof(short_freq));
size_t total_short = 0;
size_t i;
for (i = 0; i < short_len; i++) {
if (short_freq[(unsigned char) short_str[i]] == 0)
total_short++;
short_freq[(unsigned char) short_str[i]]++;
}
match_permutations_aux(big_str, short_str, short_len, short_freq, total_short);
putchar('\n');
}
static char str_a_buff[1024];
static char str_b_buff[1024];
int main(void) {
printf("Enter string A and B.\n");
printf("> ");
while (scanf("%s%s", str_a_buff, str_b_buff) == 2) {
match_permutations(str_a_buff, str_b_buff);
printf("> ");
}
return 0;
}
What do you mean, "minimum triangle perimeter"? Can you clarify?
- 010010.bin August 01, 2015This is known as the Monty Hall problem. Read about it in wikipedia.
- 010010.bin July 31, 2015This problem may seem intimidating at first, but if you look at the tree as a special case of a graph, it's not that hard.
Perform a BFS traversal of the tree, but print the children count of a node before printing the children themselves. So, serializing is just doing a BFS, attaching the number of children before actually printing the children.
Example for a tree of integers (applies similarly to a tree of strings). If you have this tree:
1 5 2 3 4 5 6 3 7 8 9 1 10 2 11 12 0 3 13 14 15 0 0 0 0 0 0 0 0 0
This is the serialized version of a tree with the following properties:
The root is 1. 1 has 5 children; they are 2, 3, 4, 5 and 6.
2 has 3 children; they are 7, 8, 9
3 has 1 child; it is 10
4 has 2 children: 11 and 12
5 has no children
6 has 3 children: 13, 14, 15
7 has no children
8 has no children
9 has no children
10 has no children
11 has no children
12 has no children
13 has no children
14 has no children
15 has no children
Deserializing is easy. You use another queue. Initialize the queue with the first node read. Then until the queue becomes empty, dequeue a node, read the children count from input, and then read that same amount of nodes and enqueue them as you read them.
Working C++ implementation:
#include <iostream>
#include <vector>
#include <queue>
#include <sstream>
using namespace std;
struct tree_node {
int val;
vector<tree_node *> children;
tree_node(int v) : val(v) { }
};
string serialize(tree_node *root) {
if (root == NULL)
return "";
queue<tree_node*> queue;
ostringstream oss;
queue.push(root);
oss << root->val;
while (!queue.empty()) {
tree_node *curr = queue.front();
queue.pop();
oss << " " << curr->children.size();
for (vector<tree_node*>::const_iterator it = curr->children.begin();
it != curr->children.end();
it++) {
oss << " " << (*it)->val;
queue.push(*it);
}
}
return oss.str();
}
tree_node *deserialize(const string &tree_str) {
if (tree_str == "")
return NULL;
queue<tree_node*> queue;
istringstream iss(tree_str);
int value;
iss >> value;
tree_node *root = new tree_node(value);
queue.push(root);
while (!queue.empty()) {
tree_node *curr = queue.front();
queue.pop();
int count;
iss >> count;
for (int i = 0; i < count; i++) {
iss >> value;
tree_node *new_node = new tree_node(value);
curr->children.push_back(new_node);
queue.push(new_node);
}
}
return root;
}
static void destroy_tree(tree_node *root) {
if (root == NULL)
return;
for (vector<tree_node*>::iterator it = root->children.begin();
it != root->children.end();
it++)
destroy_tree(*it);
delete root;
}
int main(void) {
cout << "Enter an N-ary tree as described in the comments to deserialize and serialize it again" << endl;
cout << "> ";
string input;
while (getline(cin, input)) {
cout << serialize(deserialize(input)) << endl;
cout << "> ";
}
return 0;
}
Bubble sort? Seriously?
You are right: merge sort is usually a good way to sort a linked list. I wouldn't work for this company.
If they explicitly say it's a large file, then you probably can't keep all the data in in-memory data structures like hash tables or binary search trees.
In any case, grouping together all the anagrams can be achieved by sorting each string while keeping a copy of the original, and then sort all the words based on their sorted version.
So, start by making a first pass over the file. The result of this step is to create a second file of key value pairs, where each pair K, V consists of a sorted word followed by the original word.
For example, if the file had the words "My dog is god", the first pass would create a file with these contents:
My My
dgo dog
is
dgo god
Then, sort the auxiliary file that was just created. Since we're dealing with very large datasets, you might have to use an external sort algorithm. Merge sort is a good candidate here -- probably the interviewer would test your knowledge of how merge sort works externally to sort a big amount of data.
After sorting the file, you have all anagrams close to each other:
dgo dog
dgo god
is is
My My
Or it might just be the opposite. He may be interested in testing if you know syscalls and how find works under the hood.
- 010010.bin July 17, 2015What exactly do you mean by "identify"? The rank of the bit? There's no general solution for an N-bit integer, but for reasonably small values you can make a lookup table to get O(1).
- 010010.bin July 11, 2015Naive recursion will be O(2^(max(N, M)). With dynamic programming you can make it O(N*M) which is a *HUGE* improvement.
Going from recursive approach to DP is not very hard if you think about the recursion tree.
C++ implementation with naive recursion and DP approach:
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
unsigned scheduler_possibilities(unsigned m, unsigned n) {
if (m == 0 || n == 0) {
return 1;
}
return scheduler_possibilities(m-1, n)+scheduler_possibilities(m, n-1);
}
unsigned scheduler_possibilities_dp(unsigned m, unsigned n) {
unsigned dim = max(m+1, n+1);
vector<vector<unsigned> > dp(dim, vector<unsigned>(dim));
for (size_t i = 0; i < dim; i++) {
dp[i][0] = 1;
dp[0][i] = 1;
}
for (size_t i = 1; i < dim; i++) {
for (size_t j = 1; j < dim; j++) {
dp[i][j] = dp[i-1][j]+dp[i][j-1];
}
}
return dp[m][n];
}
int main(void) {
cout << "Enter M and N" << endl;
cout << "> ";
unsigned m, n;
while (cin >> m >> n) {
//cout << scheduler_possibilities(m, n) << endl;
cout << scheduler_possibilities_dp(m, n) << endl << "> ";
}
return 0;
}
Generating the powerset of a set of N elements cannot be any better than O(2^N), so there's not much you can do. Any algorithm will stop working with relatively small values for N.
- 010010.bin July 11, 2015Negative numbers are never powers of 2. Would be better as n > 0 && (n & (n-1)) == 0
- 010010.bin July 10, 2015It depends on the implementation. There are some guarantees, but the implementation is not forced to use a mutex.
The C standard I/O library (stdio) documentation states that each call to printf(), scanf(), getchar(), putchar(), and any other stdio functions behave as if a call to flockfile(3) was made before invoking the specified function and a call to funlockfile(3) was made after invocation.
So, stdio is thread safe, as long as the threads are well behaved and use the accepted, known interface to deal with file streams - the stdio library.
The same guarantees apply to C++, but note that things like
cout << "Hello" << " World";
is actually 2 calls, so this is not atomic.
Note, however, that stdio and any I/O library uses the read(2) and write(2) syscalls under the hood, which are not atomic. If a misbehaving process neglects to use stdio and starts reading and writing with read(2) and write(2), then things can start to break even if everyone else uses stdio to manipulate the file.
So, in short, as long as you use the library, you are mostly safe, but an important point to mention to the interviewer is that the exact implementation details of how stdio manages locking are very platform specific and don't necessarily rely on a mutex.
I believe the idea is to do it in the same matrix. Also this won't scale, the running time is too bad.
- 010010.bin July 05, 2015Sort the matrix first as if it were a regular array. That is, for an MxN matrix treat it as a sequential, linear array of M*N elements and sort it using a known sorting algorithm.
Then, swap the first row with the last, the second row with the second to last, the third row with the third to last, etc.
This is O(MN log(MN)) as it is bounded by the initial sorting. No additional memory is used so space complexity is O(1). I believe you can't achieve better than these bounds.
#include <stdio.h>
static void swap(int arr[], size_t i, size_t j) {
int tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
}
static void swap_arrays(int arr1[], int arr2[], size_t sz) {
size_t i;
for (i = 0; i < sz; i++) {
int tmp = arr1[i];
arr1[i] = arr2[i];
arr2[i] = tmp;
}
}
static void quicksort(int arr[], size_t sz) {
if (sz <= 1) {
return;
}
swap(arr, 0, sz/2);
size_t i, j;
i = 0;
for (j = i+1; j < sz; j++) {
if (arr[j] <= arr[0]) {
i++;
swap(arr, i, j);
}
}
swap(arr, 0, i);
quicksort(arr, i);
quicksort(arr+i+1, sz-(i+1));
}
void matrix_mostly_sorted(size_t rows, size_t cols, int matrix[rows][cols]) {
quicksort(&matrix[0][0], rows*cols);
size_t i;
for (i = 0; i < rows/2; i++) {
swap_arrays(&matrix[i][0], &matrix[rows-(i+1)][0], cols);
}
}
static void print_matrix(size_t rows, size_t cols, int matrix[rows][cols]) {
size_t i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
printf("%d\t", matrix[i][j]);
}
putchar('\n');
}
}
int main(void) {
printf("Enter matrix dimensions followed by the elements\n");
printf("> ");
size_t rows, cols;
while (scanf("%zu%zu", &rows, &cols) == 2) {
int matrix[rows][cols];
size_t i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
scanf("%d", &matrix[i][j]);
}
}
matrix_mostly_sorted(rows, cols, matrix);
print_matrix(rows, cols, matrix);
printf("> ");
}
return 0;
}
deb and beed are not anagrams, so they belong to separate sets. Two strings are an anagram if they have the same count of each character. beed has one more 'e' than deb so they are not anagrams.
Also you forgot few. So there you go - 5 sets of anagrams.
A naive approach to this problem would be to have an array of stacks and update the necessary
stacks on each folding operation. The problem is that each fold requires us to reverse half of
the stacks at each step, or, to put it another way, pop everything from half of the stacks
and push it again on the other half.
When processing the i-th fold, each stack has 2^i elements, and we would have to pop
2^i elements out of 2^N/2^(i+1) stacks and push them back into the other 2^N/2^(i+1) stacks.
So for each step we pop half of the total elements and push them again.
For an array of size M, this amounts to O(M log(M)), since there are log(M) steps, and each step
does O(M/2 + M/2) work (pop + push).
This code goes a little further and optimizes the stack merging step by doing it in O(1). To do
so, it has each stack element point to two elements - the node below and the node underneath.
These pointers are not always accurante: because we don't actually reverse the stacks when
merging, traversing a stack from top to bottom is not as easy as following pointers to the
element that comes next, since a merge operation effectively turns a stack upside down
(so the element below is now the element above, and vice versa). We just have to be careful
with this when traversing the stack by following the link that does NOT takes us to the previous
node we just came from.
Also, each stack base (the bottommost node, that never changes) in the array keeps a pointer to
the current top of the stack whose bottom is that node. This ensures that we can find the top
of a stack in O(1) given any node in the array, and merge them in O(1).
This optimization reduces the runtime down to O(M) at the cost of a little memory overhead to
store the pointers.
Note that a naive complexity analysis would argue that the runtime is still O(M log(M)), because
at each step we do at most M/2 merges and there are log(M) steps. This is wrong though: O(1)
merges amortize the total runtime to O(M/2) because the stack sizes double on each step.
So, step 1 does M/2 merges, step 2 does M/4, step 3 does M/8:
M/2 + M/4 + M/8 + M/16 + ...
Which is bounded by 2M and thus the algorithm is O(M).
Note, however, that M = 2^N, but we need to at least represent the paper elements to move them
around, so this is probably the best asymptotic complexity we can get.
struct array_node {
int value;
struct array_node *up;
struct array_node *down;
struct array_node *top;
};
static void merge_stacks(struct array_node *below, struct array_node *above) {
if (below->up == NULL) {
below->up = above;
} else {
assert(below->down == NULL);
below->down = above;
}
if (above->up == NULL) {
above->up = below;
} else {
assert(above->down == NULL);
above->down = below;
}
}
void fold(struct array_node arr[], const char cmds[], size_t cmds_sz) {
size_t beg = 0;
size_t arr_sz = 1 << cmds_sz;
size_t len = arr_sz;
size_t i;
for (i = 0; i < cmds_sz; i++) {
size_t j, k;
for (j = beg, k = beg+arr_sz-1; j < beg+arr_sz/2; j++, k--) {
if (cmds[i] == 'L') {
merge_stacks(arr[k].top, arr[j].top);
arr[k].top = &arr[j];
} else if (cmds[i] == 'R') {
merge_stacks(arr[j].top, arr[k].top);
arr[j].top = &arr[k];
} else {
assert(0);
}
}
arr_sz /= 2;
if (cmds[i] == 'L') {
beg += arr_sz;
}
}
assert(arr_sz == 1);
struct array_node *curr = arr[beg].top;
struct array_node *prev = NULL;
struct array_node *next;
for (i = 0; i < len; i++) {
printf("%d ", curr->value);
if (curr->up == NULL || curr->up == prev) {
next = curr->down;
} else {
next = curr->up;
}
prev = curr;
curr = next;
}
printf("\n");
}
int main(void) {
printf("Enter N followed by the sequence of commands.\n");
printf("> ");
size_t n;
while (scanf("%zu", &n) == 1) {
struct array_node values[1 << n];
size_t i;
for (i = 0; i < (1 << n); i++) {
values[i].value = i+1;
values[i].top = &values[i];
values[i].up = values[i].down = NULL;
}
char cmds[n];
for (i = 0; i < n; i++) {
scanf(" %c", &cmds[i]);
}
fold(values, cmds, n);
printf("> ");
}
return 0;
}
Some tests:
Enter N followed by the sequence of commands.
> 3
L R L
5 4 1 8 7 2 3 6
> 3
L L L
7 2 3 6 5 4 1 8
> 3
R R R
2 7 6 3 4 5 8 1
> 3
R L R
4 5 8 1 2 7 6 3
>
Seems to be good.
- 010010.bin June 30, 2015This is easier said than done. How do you get an index to the other array out of the hash code? This requires a very specific and contextualized hash function. How do you choose the hash function? How do you even find it efficiently?
- 010010.bin June 25, 2015Compilation warnings aside, your getPoint() function is wrong, and consequently the whole algorithm doesn't work.
Counter-example:
Find 6 in [ 6, 20, 15, 12, 11, 10, 9 ].
Hint: getPoint() will mistakenly recurse on array[4..6] looking for the maximum.
And how would you find the largest index-increasing subsequence?
- 010010.bin June 19, 2015Can be solved in linear time.
Partition the array into a negative side and a positive side. The switch point can be found in O(n) by iterating from right to left until a negative value is found (or until we fall off the left end). It could even be found in O(log(n)) with a modified binary search, but it's not worth the hassle, since the rest of the algorithm is O(n). Let idx_negative be the index of the rightmost negative integer, or -1 if there are no negatives.
Now, perform a conceptual merge sort where one array is the subarray A[idx_negative+1..n-1] and the other array is the array that you'd get if you travel right to left on the negative side, taking the absolute value of each element along the way.
In other words, the problem reduces to a merge step of mergesort if you look at your array as 2 independent sorted arrays (where the negative array is traversed right to left after getting the absolute value of each element). To avoid counting the same number twice, keep track of the previous number that would have been inserted in the output array of mergesort - if the current number is equal, don't count it, otherwise, count it.
Implementation in C:
#include <stdio.h>
#include <assert.h>
unsigned abs_distinct_merge(int arr[], size_t arr_sz, int idx_positive, int idx_negative, int last) {
unsigned res = 0;
while (idx_negative >= 0 && idx_positive < arr_sz) {
int next;
if (-arr[idx_negative] < arr[idx_positive]) {
next = -arr[idx_negative];
idx_negative--;
} else {
next = arr[idx_positive];
idx_positive++;
}
if (next != last) {
res++;
}
last = next;
}
while (idx_negative >= 0) {
if (last != -arr[idx_negative]) {
res++;
}
last = -arr[idx_negative];
idx_negative--;
}
while (idx_positive < arr_sz) {
if (last != arr[idx_positive]) {
res++;
}
last = arr[idx_positive];
idx_positive++;
}
return res;
}
unsigned abs_distinct(int arr[], int arr_sz) {
int right_neg;
for (right_neg = arr_sz-1; right_neg >= 0 && arr[right_neg] > 0; right_neg--)
; /* Intentionally left blank */
int idx_positive = right_neg+1;
int idx_negative = right_neg;
int last;
if (idx_negative >= 0 && idx_positive < arr_sz) {
if (-arr[idx_negative] < arr[idx_positive]) {
last = -arr[idx_negative];
idx_negative--;
} else {
last = arr[idx_positive];
idx_positive++;
}
} else if (idx_negative >= 0) {
last = -arr[idx_negative];
idx_negative--;
} else if (idx_positive < arr_sz) {
last = arr[idx_positive];
idx_positive++;
} else {
assert(0);
}
return abs_distinct_merge(arr, arr_sz, idx_positive, idx_negative, last)+1;
}
static int array_buf[1024];
int main(void) {
printf("Enter array size, followed by the elements. Array must be sorted.\n");
printf("> ");
size_t array_sz;
while (scanf("%zu", &array_sz) == 1) {
assert(array_sz <= sizeof(array_buf)/sizeof(array_buf[0]));
size_t i;
for (i = 0; i < array_sz; i++) {
scanf("%d", &array_buf[i]);
}
unsigned distcount = abs_distinct(array_buf, array_sz);
printf("Absolute distinct count = %u\n", distcount);
printf("> ");
}
return 0;
}
The problem can be solved in O(n).
Basically, at each index K you need to know if the largest element in A[0..K-1] is less than or equal to A[K], and if the smallest element in A[K+1..N-1] is greater than or equal to A[K].
Build two auxiliary arrays, mins and maxs, such that mins[i] stores the smallest element in array[i+1..N-1], and maxs[i] stores the largest element in array[0..i-1]. The auxiliary arrays can be built in O(N).
After building the auxiliary arrays, iterate through the array once more, looking for a position where maxs[K] <= array[K] and mins[K] >= array[K]. If found, return it; otherwise, if the loop ends, no such K exists.
Working implementation:
#include <iostream>
#include <limits>
#include <vector>
using namespace std;
vector<int>::const_iterator magnitude_pole(const vector<int> &arr) {
if (arr.size() == 0) {
return arr.end();
}
vector<int> mins(arr.size());
vector<int> maxs(arr.size());
mins[arr.size()-1] = numeric_limits<int>::max();
maxs[0] = numeric_limits<int>::min();
for (vector<int>::size_type i = 1; i < arr.size(); i++) {
mins[arr.size()-1-i] = min(mins[arr.size()-i], arr[arr.size()-i]);
maxs[i] = max(maxs[i-1], arr[i-1]);
}
for (vector<int>::size_type i = 0; i < arr.size(); i++) {
if (maxs[i] <= arr[i] && mins[i] >= arr[i]) {
return arr.begin()+i;
}
}
return arr.end();
}
int main(void) {
cout << "Enter number of elements in array followed by each element" << endl;
cout << "> ";
vector<int>::size_type elems;
while (cin >> elems) {
vector<int> array(elems);
for (vector<int>::size_type i = 0; i < elems; i++) {
cin >> array[i];
}
vector<int>::const_iterator mag_pole = magnitude_pole(array);
if (mag_pole == array.end()) {
cout << "No magnitude pole found." << endl;
} else {
cout << "Magnitude pole found at index " << mag_pole-array.begin() << endl;
}
cout << "> ";
}
return 0;
}
Won't work. Counter-example: 1, 2, 9, 9, 2. Can be sorted by swapping the first 9 with the second 2; the code throws an exception.
- 010010.bin June 10, 2015Doesn't work. This code is checking whether the array has at most one element out of place.
Consider this array:
1, 2, 3, 9, 6, 7, 4, 12, 13
The code will return false, but the array can be made sorted by swapping 9 with 4, so it should return true.
Won't work.
Take this heap as an example:
15, 12, 10, 8, 9, 5, 6, 6, 7, 8, 5
For k = 7 (counting from 1), the k-th max is in level 3 (counting from 0), and your algorithm would assume it's in level 2 because levels 0-2 contain 7 elements.
It can be found in linear time using a suffix tree. The algorithm is described in Algorithms on Strings, Trees and Sequences, section 7.4.
Suppose the strings to match are str1 and str2. The algorithm works as follows: first, build the generalized suffix tree for both strings. If you're not familiar with the concept, it's basically a suffix tree for both str1 and str2, that is, the same tree stores both the suffixes of str1 and str2. The only difference when compared with a traditional suffix tree is that the leafs in a generalized suffix tree can have different terminators (there is one terminator for each input string). So, for example, if str1 has size M and str2 has size N, we will have M+1 leafs with the terminator for str1 - let it be $1 - and N+1 leafs with the terminator for str2 - let it be $2.
For this problem, the generalized suffix tree traditional algorithm is modified so as to keep track of which terminators are in a given subtree. An internal node is marked with a 1 if there is at least a leaf below that contains the terminator $1; we do the same thing for str2 and $2.
The generalized suffix tree can be built in linear time using Ukkonen's algorithm.
Now, with the generalized suffix tree, the problem is reduced to finding the internal node with the largest string-depth that was marked with both a 1 and a 2, since internal nodes marked twice represent common substrings. So, just traverse the tree and visit every node to find the deepest one that meets such criteria. Once found, the longest common substring corresponds to the string path of that node.
It is considerably easier to understand and explain when in a whiteboard -- this is very well suited for an in-person interview.
I do believe that the point of the question is to have you explain the algorithm and show that you understand suffix trees and their construction, but I think that implementing it in the timespan of an interview is extremely hard, if not impossible. Suffix tree construction is a hard topic and coming up with bug-free code is not trivial.
Is the question about printing an unsorted list in sorted order (ascending and descending), or is it about printing a list left-to-right and right-to-left?
If it's the former, it's pretty much impossible unless you are given some specific information about the data that the list holds, since sorting in general is not achievable in O(n).
The solution to this problem is outlined by Jon Bentley in Programming Pearls, Column 2. The idea is that the amount of characters to rotate acts as if your string was divided into two blocks, A and B, such that the original string is AB and you wish to turn it into BA.
The elegant way to do it is with reverse(reverse(A) + reverse(B)). This is time- and space-efficient.
Out of curiosity, here's a small interesting excerpt of that Column: Brian Kernighan and P. J. Plauger used precisely this code in their 1981 Software Tools in Pascal to move lines within a text editor. Kernighan reports that it ran coorectly the first time it was executed, while their previous code for a similar task based on linked lists contained several bugs.
In place, O(n) solution, ready with a main() to run your own tests:
#include <stdio.h>
#include <string.h>
void reverse(char *str, size_t begin, size_t end) {
while (begin < end) {
end--;
char tmp = str[begin];
str[begin] = str[end];
str[end] = tmp;
begin++;
}
}
void left_rotate(char *str, size_t amt) {
size_t str_sz = strlen(str);
amt %= str_sz;
reverse(str, 0, amt);
reverse(str, amt, str_sz);
reverse(str, 0, str_sz);
}
static char input_buf[512];
int main(void) {
printf("Enter a string and a value to rotate it left.\n");
printf("> ");
while (scanf("%s", input_buf) == 1) {
size_t amt;
scanf("%zu", &amt);
left_rotate(input_buf, amt);
printf("%s\n", input_buf);
printf("> ");
}
return 0;
}
Hi, glad you found the code useful. I haven't touched Java for a while, so I'm afraid I can't help you much on that. I'm not comfortable with it and it would take me a while, I guess. But if you understand the algorithm, and if you know Java, it should be easy for you to convert it. Just make sure to understand how powerset_aux() works and you're good. About the memory error: well, the algorithm is O(2^N), and it can't possibly get any better than that, so you will always be out of memory pretty soon. An iterative approach may trade memory usage by runtime, but then your program just won't terminate anytime soon. There's nothing you can do about that.
- 010010.bin May 25, 2015Sort the array. Then, code the usual recursive powerset implementation, but with some changes: each call will first recursively compute the powerset without the current element. Then, iterate from the current index and keep incrementing until a different element is found. Now, recurse with 1 element, 2 equal elements, 3 equal elements, etc., but passing the index of the first different element to the next call.
Here's a working implementation in C++:
#include <iostream>
#include <algorithm>
#include <vector>
using namespace std;
static void powerset_aux(const vector<int> &elems, vector<int>::size_type elems_i, vector<int> &out_buff) {
if (elems_i >= elems.size()) {
cout << "{ ";
if (out_buff.size() > 0) {
cout << out_buff[0];
}
for (vector<int>::size_type i = 1; i < out_buff.size(); i++) {
cout << ", " << out_buff[i];
}
cout << " }" << endl;
return;
}
vector<int>::size_type next_different;
for (next_different = elems_i+1;
next_different < elems.size() && elems[next_different] == elems[elems_i];
next_different++)
; /* Intentionally left blank */
powerset_aux(elems, next_different, out_buff);
for (vector<int>::size_type next_eq = elems_i; next_eq != next_different; next_eq++) {
out_buff.push_back(elems[next_eq]);
powerset_aux(elems, next_different, out_buff);
}
for (vector<int>::size_type i = elems_i; i != next_different; i++) {
out_buff.pop_back();
}
}
void powerset(vector<int> &elems) {
sort(elems.begin(), elems.end());
vector<int> buff;
powerset_aux(elems, 0, buff);
}
int main(void) {
cout << "Enter the number of elements, followed by each element, to generate the power set." << endl;
cout << "> ";
vector<int>::size_type elems_no;
while (cin >> elems_no) {
vector<int> elems;
for (vector<int>::size_type i = 0; i < elems_no; i++) {
int element;
cin >> element;
elems.push_back(element);
}
powerset(elems);
cout << "> ";
}
return 0;
}
Space complexity is not O(1)
- 010010.bin September 23, 2015