Alex
BAN USERO(S^2 * N) time, where S is sum of the input pipes' lengths, and N is number of the pipes.
#include <iostream>
#include <vector>
#include <unordered_set>
using namespace std;
void LargestPipes(vector<int> const &a, vector<int> &p1, vector<int> &p2, vector<int> &p1_out,
vector<int> &p2_out, int16_t &len_out, unordered_set<uint64_t> &memo,
int16_t len1 = 0, int16_t len2 = 0, int idx = 0)
{
if (idx < 0 ||
idx > a.size())
{
return;
}
if (len1 == len2 &&
len1 > len_out)
{
len_out = len1;
p1_out = p1;
p2_out = p2;
}
if (idx == a.size()) {
return;
}
uint64_t memo_key = (static_cast<uint64_t>(len1) << 48) | (static_cast<uint64_t>(len2) << 32) | idx;
if (memo.find(memo_key) != memo.end()) {
return;
}
memo.insert(memo_key);
p1.push_back(a[idx]);
LargestPipes(a, p1, p2, p1_out, p2_out, len_out, memo, len1 + a[idx], len2, idx + 1);
p1.pop_back();
p2.push_back(a[idx]);
LargestPipes(a, p1, p2, p1_out, p2_out, len_out, memo, len1, len2 + a[idx], idx + 1);
p2.pop_back();
LargestPipes(a, p1, p2, p1_out, p2_out, len_out, memo, len1, len2, idx + 1);
}
int main()
{
vector<int> p1, p2, p1_out, p2_out;
int16_t len_out;
unordered_set<uint64_t> memo;
LargestPipes({1, 2, 3, 4, 6}, p1, p2, p1_out, p2_out, len_out, memo);
for (int n : p1_out) {
cout << n << ", ";
}
cout << "\n";
for (int n : p2_out) {
cout << n << ", ";
}
cout << "\n";
}
My idea is to find number of paths from each point (excluding the end point) to each other point. After that, number of paths from the start point to the end point would look like:
number_of_path(start->1) * number_of_path(1->2) * number_of_path(2->3) * number_of_path(3->end) +
number_of_path(start->2) * number_of_path(2->1) * number_of_path(1->3) * number_of_path(3->end) +
and so on, using all permutations of 1, 2, 3.
This can be improved by not generating permutations that are not viable. E.g. if checkpoint 2 is located above or to the left of checkpoint 1, it doesn't make sense to generate permutations containing 1, 2, since, we can't move up or left.
Update: actually, given the constraints (moving only right and down) there is only 1 (or 0) sequence of the checkpoints in which we can visit all of them. So, we can find that sequence (actually, sorting the checkpoints by row and column), e.g. start->1->2->3->end, end then find counts for the segments of the path and multiply the counts.
#include <iostream>
#include <vector>
#include <unordered_set>
#include <unordered_map>
using namespace std;
class Perms {
public:
static vector<vector<int>> Get(int size)
{
unordered_set<int> s;
for (int i = 0; i < size; ++i) {
s.insert(i + 1);
}
vector<vector<int>> out;
vector<int> perm;
PermsRec(s, perm, out);
return out;
}
private:
static void PermsRec(unordered_set<int> &s, vector<int> &perm, vector<vector<int>> &out)
{
if (s.empty()) {
if (!perm.empty()) {
out.push_back(perm);
}
return;
}
vector<int> keys;
keys.insert(keys.end(), s.begin(), s.end());
for (int n : keys) {
perm.push_back(n);
s.erase(n);
PermsRec(s, perm, out);
s.insert(n);
perm.pop_back();
}
}
};
class Point {
public:
Point(int r, int c)
{
r_ = r;
c_ = c;
}
int16_t r_, c_;
};
uint64_t Key(int from_idx, int to_idx, vector<Point> const &p)
{
return (static_cast<uint64_t>(p[from_idx].r_) << 48) |
(static_cast<uint64_t>(p[from_idx].c_) << 32) |
(static_cast<uint64_t>(p[to_idx].r_) << 16) |
p[to_idx].c_;
}
void CountsFromPoint(int m, int n, vector<Point> const &p, int source_idx, unordered_map<uint64_t, uint64_t> &p_to_p_count)
{
vector<vector<uint64_t>> dp(m, vector<uint64_t>(n, 0));
auto &s = p[source_idx];
dp[s.r_][s.c_] = 1;
for (int r = s.r_; r < m; ++r) {
for (int c = 0; c < n; ++c) {
if (r - 1 >= 0) {
dp[r][c] += dp[r - 1][c];
}
if (c - 1 >= 0) {
dp[r][c] += dp[r][c - 1];
}
}
}
for (int i = 0; i < p.size(); ++i) {
p_to_p_count[Key(source_idx, i, p)] = dp[p[i].r_][p[i].c_];
}
}
uint64_t Count(int m, int n, Point source, Point target, vector<Point> const &checkpoints)
{
vector<Point> p;
p.push_back(source);
p.insert(p.end(), checkpoints.begin(), checkpoints.end());
p.push_back(target);
unordered_map<uint64_t, uint64_t> p_to_p_count;
for (int i = 0; i < p.size() - 1; ++i) {
CountsFromPoint(m, n, p, i, p_to_p_count);
}
auto perms = Perms::Get(checkpoints.size());
uint64_t total_count = 0;
for (auto &perm : perms) {
uint64_t count = p_to_p_count[Key(0, perm[0], p)];
for (int i = 0; i + 1 < perm.size() && count != 0; ++i) {
count *= p_to_p_count[Key(perm[i], perm[i + 1], p)];
}
count *= p_to_p_count[Key(perm[perm.size() - 1], p.size() - 1, p)];
total_count += count;
}
return total_count;
}
int main()
{
cout << Count(4, 4, {0, 0}, {3, 3}, {{1, 1}, {2, 2}}) << "\n";
}
Below is the code that works for any set of elements.
If we should somehow use the knowledge that the elements are 1,2,..,9, maybe, we should pre-compute all of possible results and store them in "n,k" => sets hashmap.
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
vector<vector<int>> SumSets(vector<int> const &a, int n, int16_t k,
unordered_map<uint64_t, vector<vector<int>>> &memo, int16_t idx = 0)
{
vector<vector<int>> out;
if (k < 0 ||
n < 0 ||
idx < 0)
{
return out;
}
if (k == 0 &&
n == 0)
{
out.resize(1);
return out;
}
if (idx > a.size()) {
return out;
}
uint64_t memo_key = (static_cast<uint64_t>(k) << 48) | (static_cast<uint64_t>(idx) << 32) | n;
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
out = SumSets(a, n - a[idx], k - 1, memo, idx + 1);
for (auto &s : out) {
s.push_back(a[idx]);
}
auto skip = SumSets(a, n, k, memo, idx + 1);
out.insert(out.end(), skip.begin(), skip.end());
memo[memo_key] = out;
return memo[memo_key];
}
int main()
{
unordered_map<uint64_t, vector<vector<int>>> memo;
auto out = SumSets({1, 2, 3, 4, 5, 6, 7, 8, 9}, 9, 3, memo);
for (auto &s : out) {
for (int n : s) {
cout << n << ", ";
}
cout << "\n";
}
}
void Erase(Node *n, vector<Node *> &forest, Node *parent = NULL)
{
if (n) {
Erase(n->left_, forest, n);
Erase(n->right_, forest, n);
if (ShouldBeErased(n)) {
if (parent) {
if (parent->left_ == n) {
parent->left_ = NULL;
} else {
parent->right_ = NULL;
}
}
} else if (parent == NULL ||
ShouldBeErased(parent))
{
forest.push_back(n);
}
}
}
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
unordered_map<string, int> Preprocess(vector<pair<string, int>> const &a)
{
unordered_map<string, int> m;
for (auto &el : a) {
string const &w = el.first;
int val = el.second;
for (int i = 1; i < w.size(); ++i) {
for (int j = i + 1; j < w.size(); ++j) {
string key = w.substr(0, i) + '|' + w.substr(j);
m[key] = max(m[key], val);
}
}
m[w + '|'] = max(m[w], val);
m['|' + w] = max(m[w], val);
}
return m;
}
int GetMax(unordered_map<string, int> const &m, string const &pref, string const &suff)
{
auto it = m.find(pref + '|' + suff);
return it == m.end() ? -1 : it->second;
}
int main()
{
auto m = Preprocess({
{"google", 30},
{"gogle", 20},
});
cout << GetMax(m, "go", "le") << "\n";
}
O(n^2) time, O(n) space.
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
class Node {
public:
Node()
{
terminal_ = false;
}
~Node()
{
for (auto it = children_.begin(); it != children_.end(); ++it) {
delete it->second;
}
children_.clear();
}
Node *AddChild(char c)
{
Node *n = Child(c);
if (n == NULL) {
n = new Node();
children_[c] = n;
}
return n;
}
Node *Child(char c)
{
auto it = children_.find(c);
return it == children_.end() ? NULL : it->second;
}
bool terminal_;
private:
unordered_map<char, Node *> children_;
};
class Trie {
public:
void Add(string const &s)
{
Node *n = &root_;
for (char c : s) {
n = n->AddChild(c);
}
n->terminal_ = true;
}
Node *Root()
{
return &root_;
}
private:
Node root_;
};
int Count(Trie &trie, string const &s, vector<int> &memo, int idx = 0)
{
if (idx < 0 ||
idx > s.size())
{
return 0;
}
if (idx == s.size()) {
return (idx == 0) ? 0 : 1;
}
if (memo.size() < s.size()) {
memo.resize(s.size(), -1);
}
if (memo[idx] != -1) {
return memo[idx];
}
int count = 0;
Node *n = trie.Root();
for (int i = idx; i < s.size(); ++i) {
n = n->Child(s[i]);
if (!n) {
break;
}
if (n->terminal_) {
count += Count(trie, s, memo, i + 1);
}
}
memo[idx] = count;
return memo[idx];
}
int main()
{
Trie trie;
trie.Add("dog");
trie.Add("eat");
trie.Add("eats");
trie.Add("scare");
trie.Add("care");
vector<int> memo;
cout << Count(trie, "dogeatscare", memo) << "\n";
}
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
void Print(vector<int> const &a)
{
for (int i = 0; i + 1 < a.size(); i += 2) {
cout << "[" << a[i] << ", " << a[i + 1] << "] ";
}
cout << "\n";
}
int CountRec(vector<int> &a, unordered_map<int, int> &seat, unordered_map<int, int> &partner,
unordered_map<int, int> &swaps, unordered_map<string, int> &memo, int idx = 0)
{
if (idx < 0 ||
idx > a.size())
{
return -1;
}
if (idx == a.size()) {
Print(a);
return 0;
}
string memo_key;
for (int i = idx; i < a.size(); ++i) {
memo_key += to_string(a[i]) + "\t" + to_string(swaps[a[i]]) + "\n";
}
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
if (partner[a[idx]] == a[idx + 1]) {
return CountRec(a, seat, partner, swaps, memo, idx + 2);
}
int min_count = numeric_limits<int>::max();
vector<int> idxes = {idx, idx + 1};
for (int i : idxes) {
int j = seat[partner[a[i == idx ? idx + 1 : idx]]];
if (swaps[a[i]] < 2 &&
swaps[a[j]] < 2)
{
swap(a[i], a[j]);
swap(seat[a[i]], seat[a[j]]);
++swaps[a[i]];
++swaps[a[j]];
int count = CountRec(a, seat, partner, swaps, memo, idx + 2);
if (count != -1) {
count++;
min_count = min(min_count, count);
}
--swaps[a[i]];
--swaps[a[j]];
swap(seat[a[i]], seat[a[j]]);
swap(a[i], a[j]);
}
}
if (min_count == numeric_limits<int>::max()) {
min_count = -1;
}
memo[memo_key] = min_count;
return min_count;
}
int Count(vector<int> a) {
if (a.size() % 2) {
return -1;
}
unordered_map<int, int> seat;
for (int i = 0; i < a.size(); ++i) {
seat[a[i]] = i;
}
unordered_map<int, int> partner;
for (int i = 0; i < a.size(); i += 2) {
partner[i] = i + 1;
partner[i + 1] = i;
}
unordered_map<int, int> swaps;
unordered_map<string, int> memo;
return CountRec(a, seat, partner, swaps, memo);
}
void Shuffle(vector<int> &a)
{
for (int i = 0; i < a.size(); ++i) {
swap(a[i], a[i + (rand() % (a.size() - i))]);
}
}
int main()
{
srand(time(NULL));
int size = 10 * 2;
vector<int> a;
while (a.size() < size) {
a.push_back(a.size());
}
Shuffle(a);
Print(a);
cout << Count(a) << "\n";
}
We can scan the strings and whenever we see a different char, build two arrays of diff. If the diff arrays become larger than 2, return false. If at the end of the scan the diff arrays contain 2 elements and swapping 2 elements in one diff array makes the diff array equal, return true.
The code below is for the follow up question.
#include <iostream>
#include <vector>
#include <unordered_map>
#include <unordered_set>
using namespace std;
bool SwappableRec(string &s1, string &s2, unordered_map<char, unordered_set<int>> s2_char_to_swap_idxes, int idx = 0)
{
for (int i = idx; i < s1.size(); ++i) {
if (s1[i] != s2[i]) {
auto it = s2_char_to_swap_idxes.find(s1[i]);
if (it != s2_char_to_swap_idxes.end()) {
vector<int> idxes(it->second.begin(), it->second.end());
for (int j : idxes) {
if (j > i &&
s2[i] == s1[j])
{
swap(s2[i], s2[j]);
it->second.erase(j);
if (SwappableRec(s1, s2, s2_char_to_swap_idxes, idx + 1)) {
return true;
}
it->second.insert(j);
swap(s2[i], s2[j]);
}
}
}
return false;
}
}
return true;
}
bool Swappable(string s1, string s2)
{
if (s1.size() != s2.size() ||
s1 == s2)
{
return false;
}
unordered_map<char, unordered_set<int>> s2_char_to_swap_idxes;
for (int i = 0; i < s1.size(); ++i) {
if (s1[i] != s2[i]) {
s2_char_to_swap_idxes[s2[i]].insert(i);
}
}
return SwappableRec(s1, s2, s2_char_to_swap_idxes);
}
int main()
{
cout << Swappable("abcde", "decab") << "\n";
}
O(m * n) time.
The follow up question is similar to the CareerCup site question having id=5121790555717632 (would be cool to allow links to the website itself).
#include <iostream>
#include <vector>
using namespace std;
bool Valid(vector<vector<char>> const &m)
{
for (int r = 0; r < m.size(); ++r) {
for (int c = 0; c < m[r].size(); ++c) {
int dups = 0;
if (r > 0 &&
m[r - 1][c] == m[r][c])
{
++dups;
}
if (r > 1 &&
m[r - 2][c] == m[r][c])
{
++dups;
}
if (dups > 1) {
return false;
}
dups = 0;
if (c > 0 &&
m[r][c - 1] == m[r][c])
{
++dups;
}
if (c > 1 &&
m[r][c - 2] == m[r][c])
{
++dups;
}
if (dups > 1) {
return false;
}
}
}
return true;
}
int main()
{
vector<vector<char>> m = {
{'R', 'G', 'R', 'B'},
{'R', 'Y', 'G', 'R'},
{'G', 'B', 'Y', 'Y'},
{'Y', 'G', 'B', 'G'},
{'B', 'R', 'Y', 'B'},
};
cout << Valid(m) << "\n";
}
For arrays we can use a trie (the code below). O(n^2 + m * n) time and O(min(n, m)^2) space.
In case of a 2d array, I assume we are looking for the largest common rectangle. We can use sum of the rectangle values as a hash. For each cell, we can find the sum of values of rectangle with upper left corner 0,0, and lower right corner at the cell, for O(n*m) time. Then, we iterate through each rectangle in Matrix 1 in O(n1*m1*min(n1, m1)) time. For each rectangle we find sum of its values in O(1) time (using the precomputed sums of rectangles starting from 0,0), and append a hashmap at key = sum with coordinates fo the rectangle.
In Matrix 2 we iterate through all rectangles and get from the hashmap possible matches.
The worst case time is O(m1 * n1 * min(m1, n1) * n2 * m2), but the expected time feels like O(m1 * n1 * min(m1, n1)).
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
class Node {
public:
~Node()
{
for (auto &c : children_) {
delete c.second;
}
}
Node *Child(int k) const
{
auto it = children_.find(k);
return it != children_.end() ? it->second : NULL;
}
Node *AddChild(int k)
{
Node *n = Child(k);
if (!n) {
children_[k] = n = new Node();
}
return n;
}
private:
unordered_map<int, Node *> children_;
};
class Trie {
public:
Node *Root()
{
return &root_;
}
private:
Node root_;
};
pair<int, int> LargestCommonSubarray(vector<int> const &a1, vector<int> const &a2)
{
Trie trie;
for (int i = 0; i < a2.size(); ++i) {
Node *n = trie.Root();
for (int j = i; j < a2.size(); ++j) {
n = n->AddChild(a2[j]);
}
}
pair<int, int> out;
int max_size = 0;
for (int i = 0; i < a1.size(); ++i) {
Node *n = trie.Root();
int size = 0;
for (int j = i; j < a1.size(); ++j) {
n = n->Child(a1[j]);
if (!n) {
break;
}
if (++size > max_size) {
max_size = size;
out = pair<int, int>(i, i + size - 1);
}
}
}
return out;
}
int main()
{
auto out = LargestCommonSubarray({1, 2, 3, 4, 5}, {2, 3, 4, 5, 6});
cout << out.first << " -> " << out.second << "\n";
}
#include <iostream>
using namespace std;
class Node {
public:
Node(string val)
{
val_ = val;
left_ = right_ = NULL;
}
string val_;
Node *left_, *right_;
};
string Expression(Node *n)
{
if (!n) {
return "";
}
bool brackets = (n->val_ == "*" || n->val_ == "/");
string ex;
ex += (brackets ? "(" : "") + Expression(n->left_) + (brackets ? ")" : "");
ex += n->val_;
ex += (brackets ? "(" : "") + Expression(n->right_) + (brackets ? ")" : "");
return ex;
}
Node *Deserialize(string const &s, int &idx)
{
while (idx < s.size() &&
s[idx] == ' ')
{
++idx;
}
if (idx >= s.size()) {
return NULL;
}
Node *n = new Node(string() + s[idx++]);
if (!isdigit(n->val_[0])) {
n->left_ = Deserialize(s, idx);
n->right_ = Deserialize(s, idx);
}
return n;
}
int main()
{
Node n1("*"), n2("+"), n3("-"), n4("1"), n5("2"), n6("3"), n7("4");
n1.left_ = &n2;
n1.right_ = &n3;
n2.left_ = &n4;
n2.right_ = &n5;
n3.left_ = &n6;
n3.right_ = &n7;
cout << Expression(&n1) << "\n";
int idx = 0;
Node *root = Deserialize("* + 12-34", idx);
cout << Expression(root) << "\n";
}
I assume I eat a red bean only if I pick 2 red beans in a row.
For the case when there are only 2 beans in the jar, 1 white and 1 red, it's not difficult to derive the probabilities by considering all possible scenarios:
1. We pick the white and eat it. Probability 1/2. The red is the last.
2. We pick the red, place it back, and pick the red again, and eat it. Probability 1/4. The white is the last.
3. We pick the red, place it back, and pick the white, and eat it. Probability 1/4. The red is the last.
So, the probability of the white to be the last bean is 1/4, and probability of the red bean to be the last is 3/4 (1/2 + 1/4).
For the other amounts of white and red beans (like 3 white and 5 red), it doesn't look easy to derive a formula including w and r. The code simulates the process and calculates the probability for arbitrary amounts of w an r.
double LastBeanIsWhiteProbability(int w, int r, unordered_map<uint64_t, double> &memo, bool prev_red = false)
{
if (r < 0 ||
w < 0)
{
return 0;
}
if (r == 0) {
return 1;
}
if (w == 0) {
return 0;
}
uint64_t memo_key = (static_cast<uint64_t>(w) << 48) | (static_cast<uint64_t>(r) << 32) | prev_red;
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
double prob =
(static_cast<double>(w) / (r + w)) * LastBeanIsWhiteProbability(w - 1, r, memo, false) +
(static_cast<double>(r) / (r + w)) * LastBeanIsWhiteProbability(w, prev_red ? r - 1 : r, memo, prev_red ? false : true);
memo[memo_key] = prob;
return prob;
}
@zyfo2. Regarding moving complexity to run time. E.g.
Bus1: abcde
Bus2: abcdf
Bus3: abcdg
The graph is:
e
abcd/-f
\g
The user departs from a. We have to check what lines are stored at node a, and for each of the lines, we add a new node in BFS: (a/Bus 1, a/Bus2, a/Bus3). So, we have effectively split the graph, but at run time.
I see. It may be looking redundant for bus stops which don't have transfer paths physically. Train stations with transfer tunnels may be better examples. And it's not necessary we have to connect nodes at the station each to each. We can just model those tunnels, e.g. a circular tunnel that has platforms to many train lines.
@zyfo2. Regarding (3)->(13) and (4)->(14). That's correct. Earlier I edited my post a few times, and added a note about the links.
Station to nodes mapping is not used while traversal. It takes some memory, but not a lot.
That's correct. It's a complex graph. But in real life it's of the same complexity.
I see that your solution is similar to mine and better regarding space, but the complexity isn't eliminated by it. The great complexity of transit graphs is just moved to the run time. The traversal algorithm effectively transforms your graph to mine. So, I think it's worth to keep the graph a bit more complex (and closer to reality) but keep the algorithm as simple as possible.
@zyfo2. I'm not sure why the example won't work in my solution. It works every time in real life, when I change buses :)
Input:
Bus 1: abcde
Bus 2: fgcdh
Bus 3: ijcdkl
Graph:
(1)->(2)->(3)->(4)->(5)
| |
(6)->(7)->(8)->(9)->(10)
\ \
(11)->(12)->(13)->(14)->(15)
A note: there are also links (3)->(13), (4)->(14).
Station to node mapping:
a=>(1)
b=>(2)
c=>(3), (8), (13)
d=>(4), (9), (14)
e=>(5)
f=>(6)
g=>(7)
h=>(10)
i=>(11)
j=>(12)
k=>(14)
l=>(15)
The issue of your solution (when we don't split the station for each line, and keep several lines in a node link), is that if we traverse the graph as in real life (when the user rides only one bus at the current moment), while traversing we have to effectively split the multi-line station into several branches. If we traverse pretending the user rides several lines at the current moment, we have to find intersection of several lines every time we move to the next stations.
The code below supports duplicates.
#include <iostream>
#include <vector>
using namespace std;
class Element {
public:
Element(int val, int idx)
{
val_ = val;
idx_ = idx;
}
bool operator<(Element const &other) const
{
if (val_ == other.val_) {
return idx_ < other.idx_;
}
return val_ < other.val_;
}
int val_, idx_;
};
bool SamePattern(vector<int> const &a1, vector<int> const &a2)
{
if (a1.empty() ||
a1.size() != a2.size())
{
return false;
}
vector<Element> e1, e2;
for (int n : a1) {
e1.push_back(Element(n, e1.size()));
}
for (int n : a2) {
e2.push_back(Element(n, e2.size()));
}
sort(e1.begin(), e1.end());
sort(e2.begin(), e2.end());
for (int i = 0; i < e1.size(); ++i) {
if (e1[i].idx_ != e2[i].idx_) {
return false;
}
if (i > 0) {
if ((e1[i - 1].val_ == e1[i].val_) != (e2[i - 1].val_ == e2[i].val_)) {
return false;
}
}
}
return true;
}
bool BruteForce(vector<int> const &a1, vector<int> const &a2)
{
if (a1.empty() ||
a1.size() != a2.size())
{
return false;
}
for (int i = 0; i < a1.size(); ++i) {
for (int j = i + 1; j < a1.size(); ++j) {
if ((a1[i] > a1[j]) != (a2[i] > a2[j])) {
return false;
}
if ((a1[i] < a1[j]) != (a2[i] < a2[j])) {
return false;
}
}
}
return true;
}
int main()
{
srand(time(NULL));
for (int i = 0; i < 1000000; ++i) {
int size = rand() % 10;
vector<int> a1, a2;
for (int j = 0; j < size; ++j) {
a1.push_back(rand() % 10);
a2.push_back(rand() % 10);
}
if (BruteForce(a1, a2) != SamePattern(a1, a2)) {
cerr << "mismatch\n";
return -1;
}
}
cout << "ok\n";
}
O(n) time and space.
#include <iostream>
#include <unordered_set>
#include <unordered_map>
using namespace std;
uint64_t Count(unordered_set<int> const &sticky_steps, unordered_map<uint64_t, uint64_t> &memo,
int step, bool odd_move = true)
{
if (step < 0 ||
sticky_steps.find(step) != sticky_steps.end())
{
return 0;
}
if (step == 0) {
return 1;
}
uint64_t memo_key = (static_cast<uint64_t>(step) << 32) | odd_move;
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
uint64_t count =
Count(sticky_steps, memo, step - 1, !odd_move) +
Count(sticky_steps, memo, step - 4, !odd_move);
count += odd_move
? Count(sticky_steps, memo, step - 2, !odd_move)
: Count(sticky_steps, memo, step - 3, !odd_move);
memo[memo_key] = count;
return count;
}
int main()
{
unordered_set<int> const sticky_steps = {3};
unordered_map<uint64_t, uint64_t> memo;
cout << Count(sticky_steps, memo, 7) << "\n";
}
BFS.
#include <iostream>
#include <vector>
#include <queue>
using namespace std;
void SecurityCheck(vector<vector<int>> &m)
{
queue<pair<int, int>> q1, q2;
auto parents = &q1;
auto children = &q2;
for (int r = 0; r < m.size(); ++r) {
for (int c = 0; c < m[r].size(); ++c) {
if (m[r][c] == 'G') {
parents->push(pair<int, int>(r, c));
}
}
}
int level = 256;
while (!parents->empty()) {
while (!parents->empty()) {
auto room = parents->front();
parents->pop();
int r = room.first;
int c = room.second;
if (r >= 0 &&
r < m.size() &&
c >= 0 &&
c < m[r].size() &&
(m[r][c] == '.' || m[r][c] == 'G'))
{
if (m[r][c] == '.') {
m[r][c] = level;
}
children->push(pair<int, int>(r + 1, c));
children->push(pair<int, int>(r - 1, c));
children->push(pair<int, int>(r, c + 1));
children->push(pair<int, int>(r, c - 1));
}
}
++level;
swap(parents, children);
}
}
void Print(vector<vector<int>> const &m)
{
for (auto r : m) {
for (int n : r) {
if (n < 256) {
cout << static_cast<char>(n);
} else {
cout << n - 256;
}
cout << " ";
}
cout << "\n";
}
}
int main()
{
vector<vector<int>> m = {
{'#', '.', '.', '.', '.'},
{'.', '.', '#', '.', '.'},
{'.', 'G', '.', '#', '#'},
{'.', '#', '.', 'G', '.'},
{'.', '.', '.', '.', '.'}
};
SecurityCheck(m);
Print(m);
}
@zyfo2. It's definitely possible to use BFS in here, but the model in this case is far from reality. E.g. what if the transfer from Bus 1 to Bus 2 is open only from 1pm to 3pm. In the model when station c is represented by a single node, it would be a lot of logic to support this.
Also, if the data looks like this:
Bus 1: abcde
Bus 2: fgcdh
Bus 3: ijcdkl
, which of the bus lines will be stored in the nodes c an d? Or in the link between c and d?
#include <iostream>
#include <unordered_map>
using namespace std;
bool Rand2()
{
return rand() % 2;
}
uint32_t RandAB(uint32_t a, uint32_t b)
{
uint32_t max_val = b - a;
int bits = 0;
for (uint32_t mask = 1; mask <= max_val && mask != 0; mask <<= 1) {
++bits;
}
uint32_t rnd = 0;
do {
rnd = 0;
for (int i = 0; i < bits; ++i) {
rnd <<= 1;
rnd |= Rand2();
}
} while (rnd > max_val);
return a + rnd;
}
int main()
{
srand(time(NULL));
unordered_map<uint32_t, int> stats;
for (int i = 0; i < 100000000; ++i) {
++stats[RandAB(3, 14)];
}
for (auto &el : stats) {
cout << el.second << "=>" << el.first << "\n";
}
}
#include <iostream>
#include <vector>
using namespace std;
class Node {
public:
Node(int val)
{
val_ = val;
left_ = right_ = NULL;
}
int val_;
Node *left_, *right_;
};
class Path {
public:
Path()
{
sum_ = numeric_limits<int>::min();
}
int sum_;
vector<Node *> path_;
};
Path MaxSumPath(Node *n, Path &out)
{
if (!n) {
return Path();
}
Path l = MaxSumPath(n->left_, out);
Path r = MaxSumPath(n->right_, out);
int sum = n->val_;
if (l.sum_ > 0) {
sum += l.sum_;
}
if (r.sum_ > 0) {
sum += r.sum_;
}
if (sum > out.sum_) {
out.sum_ = sum;
out.path_.clear();
if (l.sum_ > 0) {
out.path_.insert(out.path_.end(), l.path_.begin(), l.path_.end());
}
out.path_.push_back(n);
if (r.sum_ > 0) {
for (int i = r.path_.size() - 1; i >= 0; --i) {
out.path_.push_back(r.path_[i]);
}
}
}
Path p = l.sum_ > r.sum_ ? l : r;
if (p.sum_ < 0) {
p = Path();
}
p.sum_ = (p.sum_ == numeric_limits<int>::min()) ? n->val_ : p.sum_ + n->val_;
p.path_.push_back(n);
return p;
}
int main()
{
/*
(1)
/
(2)
/ \
(3) (4)
/ /
(6) (7)
/ \
(5) (8)
*/
Node n1(1);
Node n2(2);
Node n3(3);
Node n4(4);
Node n5(5);
Node n6(6);
Node n7(7);
Node n8(8);
n1.left_ = &n2;
n2.left_ = &n3;
n2.right_ = &n4;
n3.left_ = &n6;
n4.left_ = &n7;
n7.left_ = &n5;
n7.right_ = &n8;
Path path;
MaxSumPath(&n1, path);
cout << path.sum_ << ": ";
for (Node *n : path.path_) {
cout << n->val_ << "->";
}
cout << "\n";
}
E.g.
0, 1, 1,
0, 1, 1,
0, 0, 0,
--- Flip(0, 1)
1, 0, 0,
0, 0, 1,
0, 1, 0,
--- Flip(1, 1)
1, 1, 0,
1, 1, 0,
0, 0, 0,
--- Flip(2, 1)
1, 0, 0,
1, 0, 0,
1, 1, 1,
--- Flip(2, 0)
0, 0, 0,
0, 0, 0,
0, 0, 0,
Brute force:
#include <iostream>
#include <vector>
#include <unordered_set>
using namespace std;
void Flip(vector<vector<bool>> &m, int r, int c)
{
for (int i = 0; i < m.size(); ++i) {
m[i][c] = !m[i][c];
}
for (int i = 0; i < m[r].size(); ++i) {
m[r][i] = !m[r][i];
}
m[r][c] = !m[r][c];
}
bool Zeroable(vector<vector<bool>> &m, unordered_set<string> &seen)
{
if (!m.empty() &&
!m[0].empty())
{
int zeros = 0;
string key;
for (auto &r : m) {
for (bool val : r) {
if (val == false) {
++zeros;
}
key += val;
}
}
if (zeros == m.size() * m[0].size()) {
return true;
}
if (seen.find(key) != seen.end()) {
return false;
}
seen.insert(key);
for (int r = 0; r < m.size(); ++r) {
for (int c = 0; c < m[r].size(); ++c) {
Flip(m, r, c);
if (Zeroable(m, seen)) {
return true;
}
Flip(m, r, c);
}
}
}
return false;
}
void Print(vector<vector<bool>> const &m)
{
for (auto &r : m) {
for (bool val : r) {
cout << val << ", ";
}
cout << "\n";
}
cout << "---\n";
}
int main()
{
srand(time(NULL));
int n = 3;
int m = 4;
while (true) {
vector<vector<bool>> matrix(m, vector<bool>(n));
for (int r = 0; r < matrix.size(); ++r) {
for (int c = 0; c < matrix[r].size(); ++c) {
matrix[r][c] = rand() % 2;
}
}
auto orig = matrix;
unordered_set<string> seen;
if (Zeroable(matrix, seen)) {
Print(orig);
}
}
}
To scale, we can shard the data. E.g. one server contains QueryVector object for all words beginning with "a"-"ad", the second server - "ad"-"ag", ..., the last server - "zu"-"zz". The server which gets the query can do simultaneous requests to the servers containing the request words, and merge the results.
- Alex December 02, 2017Out of the following input:
Bus 1: a->b->c->d
Bus 2: c->e->f->g
we can create two independent chains of nodes:
(1)->(2)->(3)->(4)
(5)->(6)->(7)->(8)
Weights of all the edges (1->2, 2->3, ..., 7->8) gets set to 0.
Then, since we know that station c is the source of two nodes, we connect the nodes with a transfer link 3->5:
(1)->(2)->(3)->(4)
...................\
.....................(5)->(6)->(7)->(8)
The cost of the transfer link gets set to 1.
After that, we do Dijkstra expansion in the graph. Or bi-directional Dijkstra, if the graph is not time-dependent.
Regarding storage optimization. We can compress the lists of indexes. E.g. we can use augments instead of absolute values, something like writing 38447347 (32 bits), +1(2 bits), +1 (2 bits), instead of 38447347 (32 bits), 38447348 (32 bits), 38447349 (32 bits).
#include <vector>
#include <iostream>
#include <unordered_map>
using namespace std;
class QueryVector {
public:
QueryVector(vector<string> const &words)
{
for (int i = 0; i < words.size(); ++i) {
index_[words[i]].push_back(i);
}
}
vector<int> GetIndices(vector<string> const &words)
{
vector<int> out;
for (auto &w : words) {
auto it = index_.find(w);
if (it != index_.end()) {
out.insert(out.end(), it->second.begin(), it->second.end());
}
}
return out;
}
private:
unordered_map<string, vector<int>> index_;
};
int main()
{
QueryVector qv({"an", "apple", "day", "keeps", "doctor", "away", "apple", "iphones", "cool", "doctor", "recommends"});
vector<vector<string>> in = {
{"apple"},
{"doctor", "cool"},
{"random"},
{"random","keeps","day","doctor"}
};
for (auto &a : in) {
for (int i : qv.GetIndices(a)) {
cout << i << ", ";
}
cout << "\n";
}
}
I'd do something like this:
1. Store the intervals as nodes of a doubly-linked list. Each node contains from_, to_, val_, id_. id_ is uint32_t, unique among the nodes.
2. For each node in the linked list, dump into a min heap abs difference of the node's value and the node's next node's value. The min heap's element consists of the difference and uint64_t id, which is built out of the 2 linked list nodes' ids.
3. The min heap is updatable. I.e. it's possible to update or remove a node of it by id.
4. Get the top element from the heap, parse id of the element into ids of 2 linked list nodes, get pointers to the nodes using the ids.
5. Merge the 2 nodes, update value of the first node with average of the node's values, remove the second node from the linked list.
6. Update the min heap. I.e. remove the heap node with id equal to "second_linked_list_node->next_to_second_linked_list_node", update value of the heap node with id equal to "prev_to_first_linked_list_node->first_linked_list_node", and insert a new heap node, having id "first_linked_list_node->next_to_second_linked_list_node".
I assume the node is a local minimum node, if the node's value is less than the node's parent's value, and less than the node's children's values.
void LocalMinNodes(Node *n, vector<Node *> &out, Node *parent = NULL)
{
if (n) {
if ((!parent || n->val_ < parent->val_) &&
(!n->left_ || n->val_ < n->left_->val_) &&
(!n->right_ || n->val_ < n->right_->val_))
{
out.push_back(n);
}
LocalMinNodes(n->left_, out, n);
LocalMinNodes(n->right_, out, n);
}
}
My idea is to build a directed acyclic graph and do DFS searches from all nodes that don't have inbound connections.
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
class Node {
public:
Node(string const &word)
{
word_ = word;
has_parents_ = false;
}
string word_;
bool has_parents_;
vector<Node *> children_;
};
void LongestPath(Node *n, vector<Node *> &out, vector<Node *> &path)
{
if (n) {
path.push_back(n);
if (path.size() > out.size()) {
out = path;
}
for (Node *c : n->children_) {
LongestPath(c, out, path);
}
path.pop_back();
}
}
vector<string> LongestList(vector<string> const &words)
{
unordered_map<string, Node *> word_to_node;
for (auto &w : words) {
Node *n = word_to_node[w];
if (!n) {
word_to_node[w] = n = new Node(w);
}
}
for (auto &el : word_to_node) {
Node *n = el.second;
string const &w = n->word_;
for (int i = 0; i < w.size(); ++i) {
string parent_word = w.substr(0, i) + (i + 1 < w.size() ? w.substr(i + 1) : "");
auto it = word_to_node.find(parent_word);
if (it != word_to_node.end()) {
Node *parent = it->second;
parent->children_.push_back(n);
n->has_parents_ = true;
}
}
}
vector<Node *> longest_path;
for (auto &el : word_to_node) {
Node *n = el.second;
if (!n->has_parents_) {
vector<Node *> out, path;
LongestPath(n, out, path);
if (out.size() > longest_path.size()) {
longest_path = out;
}
}
}
vector<string> longest_list;
for (Node *n : longest_path) {
longest_list.push_back(n->word_);
}
for (auto &el : word_to_node) {
delete el.second;
}
return longest_list;
}
int main()
{
for (auto &w : LongestList({"rgiving", "gving", "orgiving", "ging", "giving", "forgiving", "i", "in", "ing", "sing", "sting", "string"})) {
cout << w << "->";
}
cout << "\n";
}
DFS, starting from each company that was not acquired.
#include <iostream>
#include <vector>
#include <stack>
#include <unordered_map>
using namespace std;
class Node {
public:
Node(string id)
{
id_ = id;
parent_ = NULL;
}
vector<Node *> children_;
string id_;
Node *parent_;
};
vector<Node *> LongestPath(Node *root)
{
class StackEntry {
public:
StackEntry(Node *n, int depth)
{
n_ = n;
depth_ = depth;
}
Node *n_;
int depth_;
};
int path_size = 0;
Node *path_leaf = NULL;
stack<StackEntry> st;
st.push(StackEntry(root, 1));
while (!st.empty()) {
StackEntry &e = st.top();
st.pop();
if (e.depth_ > path_size) {
path_size = e.depth_;
path_leaf = e.n_;
}
for (Node *child : e.n_->children_) {
st.push(StackEntry(child, e.depth_ + 1));
}
}
vector<Node *> path;
for (Node *n = path_leaf; n != NULL; n = n->parent_) {
path.push_back(n);
}
reverse(path.begin(), path.end());
return path;
}
vector<string> LongestChain(vector<pair<string, string>> const &mergers)
{
unordered_map<string, Node *> id_to_node;
for (auto &m : mergers) {
Node *big = id_to_node[m.first];
if (!big) {
id_to_node[m.first] = big = new Node(m.first);
}
Node *small = id_to_node[m.second];
if (!small) {
id_to_node[m.second] = small = new Node(m.second);
}
small->parent_ = big;
big->children_.push_back(small);
}
vector<string> longest_chain;
for (auto &el : id_to_node) {
Node *n = el.second;
if (n->parent_ == NULL) {
auto chain = LongestPath(n);
if (chain.size() > longest_chain.size()) {
longest_chain.clear();
for (Node *n : chain) {
longest_chain.push_back(n->id_);
}
}
}
}
for (auto &el : id_to_node) {
delete el.second;
}
return longest_chain;
}
int main()
{
auto out = LongestChain({
{"baidu", "ofo"},
{"mobike", "alibaba"},
{"company1", "company2"},
{"baidu", "company1"},
});
for (auto &c : out) {
cout << c << "->";
}
cout << "\n";
}
For each size of the patterns we can have a sliding window. To find a match I used Rabin-Carp algorithm. Time is O(n * k), where n is number of characters we've read from the stream, and k is max size of the patterns.
#include <iostream>
#include <vector>
#include <list>
#include <unordered_map>
using namespace std;
class SlidingWindow {
public:
SlidingWindow()
{
size_ = 0;
Clear();
}
SlidingWindow(int size)
{
size_ = size;
Clear();
}
void CharIn(char c)
{
if (list_.size() == size_) {
hash_ -= static_cast<uint64_t>(list_.front()) << ((size_ - 1) * 8);
list_.pop_front();
}
hash_ = (hash_ << 8) | c;
list_.push_back(c);
}
uint64_t Hash() const
{
return list_.size() == size_ ? hash_ : numeric_limits<uint64_t>::max();
}
void Clear()
{
hash_ = 0;
list_.clear();
}
private:
int size_;
uint64_t hash_;
list<char> list_;
};
vector<pair<string, int>> Counts(list<char> &stream, vector<string> const &patterns)
{
unordered_map<int, SlidingWindow> size_to_window;
unordered_map<uint64_t, pair<string, int>> hash_to_pattern;
for (auto &p : patterns) {
if (size_to_window.find(p.size()) == size_to_window.end()) {
size_to_window[p.size()] = SlidingWindow(p.size());
}
SlidingWindow &w = size_to_window[p.size()];
for (char c : p) {
w.CharIn(c);
}
hash_to_pattern[w.Hash()] = pair<string, int>(p, 0);
w.Clear();
}
while (!stream.empty()) {
char c = stream.front();
stream.pop_front();
for (auto &el : size_to_window) {
SlidingWindow &w = el.second;
w.CharIn(c);
auto it = hash_to_pattern.find(w.Hash());
if (it != hash_to_pattern.end()) {
++it->second.second;
}
}
}
vector<pair<string, int>> out;
for (auto &el : hash_to_pattern) {
out.push_back(el.second);
}
return out;
}
int main()
{
list<char> stream = {'A', 'B', 'C', 'A', 'S', 'G', 'K', 'T', 'A', 'B', 'C', 'A', 'S', 'K', 'H', 'T'};
vector<pair<string, int>> counts = Counts(stream, {"ABCAS", "ASGKT", "KHT"});
for (auto &count : counts) {
cout << count.first << "=>" << count.second << "\n";
}
}
Top down dynamic programming. O(n^2 * k) time and O(n^3 * k) space.
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
class Path {
public:
Path()
{
distance_ = 0;
}
vector<int> cities_;
int distance_;
};
Path ShortestPath(int n, vector<vector<int>> const &distances, unordered_map<uint64_t, Path> &memo,
int k, int16_t idx = 0, int16_t prev_idx = -1)
{
if (idx < 0 ||
idx >= n)
{
return Path();
}
uint64_t memo_key = (static_cast<uint64_t>(k) << 32) | (static_cast<uint32_t>(idx) << 16) | prev_idx;
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
Path path = ShortestPath(n, distances, memo, k, idx + 1, idx);
path.cities_.push_back(idx);
if (prev_idx != -1) {
path.distance_ += distances[prev_idx][idx];
}
if (k > 0) {
Path skip = ShortestPath(n, distances, memo, k - 1, idx + 1, prev_idx);
if (skip.distance_ < path.distance_) {
path = skip;
}
}
memo[memo_key] = path;
return path;
}
int main()
{
int n = 5;
int k = 2;
vector<vector<int>> distances(n, vector<int>(n, numeric_limits<int>::max()));
distances[0][1] = 2;
distances[0][2] = 9;
distances[0][3] = 7;
distances[0][4] = 5;
distances[1][2] = 7;
distances[1][3] = 1;
distances[1][4] = 5;
distances[2][3] = 1;
distances[2][4] = 8;
distances[3][4] = 1;
unordered_map<uint64_t, Path> memo;
Path path = ShortestPath(n, distances, memo, k);
cout << path.distance_ << ": ";
for (int i = path.cities_.size() - 1; i >= 0; --i) {
cout << path.cities_[i] << "->";
}
cout << "\n";
}
Using topological sort.
#include <iostream>
#include <vector>
#include <unordered_map>
#include <unordered_set>
using namespace std;
class Job {
public:
Job(char id)
{
id_ = id;
}
void AddDependentJob(Job *job)
{
children_.push_back(job);
}
char id_;
vector<Job *> children_;
};
class Dispatcher {
public:
Dispatcher(vector<Job *> const &jobs)
{
for (Job *j : jobs) {
for (Job *to_job : j->children_) {
++inbound_[to_job];
}
candidates_.insert(j);
}
}
vector<Job *> GetNextStageJobs()
{
for (Job *j : candidates_) {
if (inbound_[j] == 0) {
next_stage_jobs_.insert(j);
}
}
candidates_.clear();
return vector<Job *>(next_stage_jobs_.begin(), next_stage_jobs_.end());
}
void JobDone(Job *j)
{
next_stage_jobs_.erase(j);
for (Job *to_job : j->children_) {
--inbound_[to_job];
candidates_.insert(to_job);
}
}
private:
unordered_map<Job *, int> inbound_;
unordered_set<Job *> next_stage_jobs_, candidates_;
};
int main()
{
Job a('A'), b('B'), c('C'), d('D');
a.AddDependentJob(&b);
a.AddDependentJob(&c);
b.AddDependentJob(&d);
Dispatcher dis({&c, &b, &a, &d});
while (true) {
auto jobs = dis.GetNextStageJobs();
if (jobs.empty()) {
break;
}
for (Job *j : jobs) {
cout << "doing " << j->id_ << "\n";
dis.JobDone(j);
}
cout << "---\n";
}
}
string Transform(string const &s)
{
string out;
int open = 0;
for (char c : s) {
switch (c) {
case '(':
++open;
out += '0';
break;
case ')':
if (open > 0) {
--open;
out += '1';
} else {
out += "-1";
}
break;
default:
out += c;
break;
}
}
if (open > 0) {
string prefix;
for (int i = 0; i < open; ++i) {
prefix += "-1";
}
out = prefix + out.substr(open, out.size() - open);
}
return out;
}
@ChrisK. Yes, I see the queue size won't exceed the rate limit.
Regarding memory consumption our solutions differ. Size of my ring buffer is not rate limit. It's interval. E.g. if interval is 60 seconds, and the limit is 1 million, the buffer will be 60 integers.
@ChrisK. It may be worth to check if back() of the queue is too old and if so, to clear the queue (maybe by replacing it with another instance). E.g. there are millions requests within the interval, then, no requests within a time greater than the interval. In this case, currently, the first request that comes after the gap will be iterating the millions of requests sitting in the queue.
- Alex November 29, 2017#include <iostream>
#include <vector>
using namespace std;
class Node {
public:
Node(int val)
{
leaf_ = true;
val_ = val;
}
Node(vector<Node *> const &children)
{
leaf_ = false;
val_ = 0;
children_ = children;
}
~Node()
{
for (Node *c : children_) {
delete c;
}
}
bool leaf_;
int val_;
vector<Node *> children_;
};
void Print (Node *n)
{
if (n) {
for (Node *c : n->children_) {
if (c->leaf_) {
cout << c->val_ << ", ";
} else {
Print(c);
}
}
}
}
int main() {
Node *root = new Node({
new Node(1),
new Node(5),
new Node(8),
new Node({
new Node(9),
new Node(10),
new Node(24),
new Node(20),
new Node({
new Node(39),
new Node(48)
}),
new Node(89)
}),
new Node(105),
new Node(99)
});
Print(root);
cout << "\n";
delete root;
}
Using a circular array and Decorator design pattern.
#include <iostream>
#include <vector>
#include <unistd.h>
using namespace std;
class Server {
public:
virtual void ProcessRequest() = 0;
};
class MyServer : public Server {
public:
virtual void ProcessRequest()
{
cout << "request processed\n";
}
};
class RateLimiter : public Server {
public:
RateLimiter(Server *server, int interval, int limit)
{
server_ = server;
limit_ = limit;
interval_ = interval;
counts_.resize(interval, 0);
start_time_ = 0;
rate_ = 0;
}
virtual void ProcessRequest()
{
if (RequestAllowed()) {
server_->ProcessRequest();
} else {
cout << "request discarded\n";
}
}
private:
bool RequestAllowed()
{
uint32_t current_time = time(NULL);
start_time_ = max(start_time_, current_time - interval_ * 2);
while (start_time_ <= current_time - interval_) {
rate_ -= counts_[start_time_ % interval_];
counts_[start_time_ % interval_] = 0;
++start_time_;
}
if (rate_ >= limit_) {
return false;
} else {
++counts_[current_time % interval_];
++rate_;
return true;
}
}
int interval_, limit_, rate_;
uint32_t start_time_;
vector<int> counts_;
Server *server_;
};
int main() {
MyServer my_server;
RateLimiter rate_limiter(&my_server, 60, 10);
Server *server = &rate_limiter;
while (true) {
server->ProcessRequest();
sleep(1);
}
}
#include <iostream>
#include <vector>
#include <set>
#include <unordered_map>
using namespace std;
int Random(int size, set<int> const &excluded)
{
int rnd = -1;
if (excluded.size() < size) {
size -= excluded.size();
rnd = rand() % size;
for (auto it = excluded.begin(); it != excluded.end() && *it <= rnd; ++it) {
++rnd;
}
}
return rnd;
}
void InitCell(vector<vector<int>> &m, int r, int c, int k)
{
set<int> excluded;
if (r - 2 >= 0 &&
m[r - 1][c] == m[r - 2][c] &&
m[r - 1][c] != -1)
{
excluded.insert(m[r - 1][c]);
}
if (r + 2 < m.size() &&
m[r + 1][c] == m[r + 2][c] &&
m[r + 1][c] != -1)
{
excluded.insert(m[r + 1][c]);
}
if (c - 2 >= 0 &&
m[r][c - 1] == m[r][c - 2] &&
m[r][c - 1] != -1)
{
excluded.insert(m[r][c - 1]);
}
if (c + 2 < m[r].size() &&
m[r][c + 1] == m[r][c + 2] &&
m[r][c + 1] != -1)
{
excluded.insert(m[r][c + 1]);
}
m[r][c] = Random(k, excluded);
}
vector<vector<int>> GenerateBoard(int m, int n, int k)
{
vector<vector<int>> board(m, vector<int>(n, -1));
for (int r = 0; r < m; ++r) {
for (int c = 0; c < n; ++c) {
InitCell(board, r, c, k);
}
}
return board;
}
int main()
{
srand(time(NULL));
int m = 8;
int n = 8;
int k = 5;
vector<vector<unordered_map<int, int>>> stats(m, vector<unordered_map<int, int>>(n));
for (int i = 0; i < 1000000; ++i) {
auto board = GenerateBoard(m, n, k);
for (int r = 0; r < m; ++r) {
for (int c = 0; c < n; ++c) {
++stats[r][c][board[r][c]];
}
}
}
for (int r = 0; r < m; ++r) {
for (int c = 0; c < n; ++c) {
cout << "[" << r << "][" << c << "]\n";
for (auto el : stats[r][c]) {
cout << "\t" << el.first << "=>" << el.second << "\n";
}
}
}
}
@Rob. That's correct. But to me it looks simpler and more efficient to use Dijkstra, limiting number of hops to 2. To make it correct, when we reach a node, we consider 2 criteria - cost, and number of hops. E.g. if we reach the node with 1 hop and $50 cost, and the node already has a label of 2 hops and $10, the new node also survives, because it contains something better in the criteria.
Alternatively, we can use the bidirectional BST to find the 3-hop subgraph (just marking its nodes as enabled), and in the subgraph, we can use a regular Dijkstra to find the actual cheapest path.
RepSWE
RepHire high professional child development center in Charlotte. Pal-A-Roo’s Child Development Center is a family-owned child care facility offering ...
Rep
RepIf you want to attract someone then astrologer kumar can help you. Astrologer kumar is specialized in offering kamdev vashikaran ...
@sri. E.g. the query is "g", "e". In the first trie we'll find 1 million of words beginning with "g", in the second trie we'll find 1 million words ending with "e". How would we merge the two sets efficiently?
- Alex December 13, 2017