Alex
BAN USER
Brute force.
#include <vector>
#include <iostream>
using namespace std;
inline bool Check(vector<vector<int>> const &m)
{
int size = m.size();
if (size > 0) {
int sum = numeric_limits<int>::min();
for (int i = 0; i < size; ++i) {
int s1 = 0;
int s2 = 0;
for (int j = 0; j < size; ++j) {
s1 += m[i][j];
s2 += m[j][i];
}
if (sum == numeric_limits<int>::min()) {
sum = s1;
}
if (s1 != sum ||
s2 != sum)
{
return false;
}
}
int s1 = 0;
int s2 = 0;
int r = 0;
int c = 0;
while (r < size &&
c < size)
{
s1 += m[r][c];
s2 += m[r++][size - 1 - c++];
}
if (s1 != sum ||
s2 != sum)
{
return false;
}
return true;
}
return false;
}
inline bool Next(vector<vector<int>> &m, int max_val)
{
int r = 0;
int c = 0;
while (++m[r][c] > max_val) {
m[r][c] = 0;
if (++c >= m.size()) {
c = 0;
if (++r >= m.size()) {
return false;
}
}
}
return true;
}
void Print(vector<vector<int>> const &m)
{
for (auto row : m) {
for (int v : row) {
cout << v << ", ";
}
cout << "\n";
}
cout << "-----\n";
}
void ProduceMagicMatrixes(int size, int max_val)
{
vector<vector<int>> m;
for (int i = 0; i < size; ++i) {
m.push_back(vector<int>());
m.back().resize(size, 0);
}
do {
if (Check(m)) {
Print(m);
}
} while (Next(m, max_val));
}
int main()
{
ProduceMagicMatrixes(3, 10);
return 0;
}
int WaysCount(string const &s, int alphabet_size, int idx = 0)
{
if (idx < 0 ||
idx > s.size())
{
return 0;
}
if (idx == s.size()) {
return s.empty() ? 0 : 1;
}
if (s[idx] == '0') {
return 0;
}
int count = 0;
for (int i = idx; i < s.size(); ++i) {
if (stoi(s.substr(idx, i - idx + 1)) > alphabet_size) {
break;
}
count += WaysCount(s, alphabet_size, i + 1);
}
return count;
}
void ReverseExpression(string &s)
{
reverse(s.begin(), s.end());
int start = -1;
for (int i = 0; i < s.size(); ++i) {
if (isdigit(s[i])) {
if (start == -1) {
start = i;
}
if (i + 1 == s.size() ||
!isdigit(s[i + 1]))
{
reverse(s.begin() + start, s.begin() + i + 1);
start = -1;
}
}
}
}
I've just realized that overwriting input doesn't actually give us any difference in terms of O.
The code below is O(mn) (for each row we scan a column only once) time, O(1) space, the input is not destroyed.
bool LonelyInColumn(vector<vector<bool>> const &m, int c)
{
bool lonely = false;
for (int r = 0; r < m.size(); ++r) {
if (m[r][c]) {
lonely = !lonely;
if (!lonely) {
break;
}
}
}
return lonely;
}
int LonelyPixelsCount(vector<vector<bool>> const &m)
{
int count = 0;
if (!m.empty() &&
!m[0].empty())
{
for (int r = 0; r < m.size(); ++r) {
bool lonely = false;
for (int c = 0; c < m[0].size(); ++c) {
if (m[r][c]) {
lonely = !lonely && LonelyInColumn(m, c);
if (!lonely) {
break;
}
}
}
if (lonely) {
++count;
}
}
}
return count;
}
Time O(mn), space O(1), but the input gets destroyed.
First it checks if the first row contains a lonely pixel. Then, the first row is used for storing a flag per column. The flag indicates if the column contains a lonely pixel.
bool LonelyInColumn(vector<vector<bool>> const &m, int c)
{
bool lonely = false;
for (int r = 0; r < m.size(); ++r) {
if (m[r][c]) {
lonely = !lonely;
if (!lonely) {
break;
}
}
}
return lonely;
}
int LonelyPixelsCount(vector<vector<bool>> &m)
{
int count = 0;
if (!m.empty() &&
!m[0].empty())
{
bool lonely = false;
for (int c = 0; c < m[0].size(); ++c) {
if (m[0][c]) {
lonely = !lonely && LonelyInColumn(m, c);
if (!lonely) {
break;
}
}
}
if (lonely) {
++count;
}
for (int c = 0; c < m[0].size(); ++c) {
m[0][c] = LonelyInColumn(m, c);
}
for (int r = 1; r < m.size(); ++r) {
bool lonely = false;
for (int c = 0; c < m[0].size(); ++c) {
if (m[r][c]) {
lonely = !lonely && m[0][c];
if (!lonely) {
break;
}
}
}
if (lonely) {
++count;
}
}
}
return count;
}
int MaxShapeFromCell(vector<vector<char>> &m, int r, int c)
{
int size = 0;
stack<pair<int, int>> st;
st.push(pair<int, int>(r, c));
while (!st.empty()) {
auto cell = st.top();
st.pop();
r = cell.first;
c = cell.second;
if (r >= 0 &&
r < m.size() &&
c >= 0 &&
c < m[r].size() &&
m[r][c] == 'X')
{
++size;
m[r][c] = '.';
st.push(pair<int, int>(r + 1, c));
st.push(pair<int, int>(r - 1, c));
st.push(pair<int, int>(r, c + 1));
st.push(pair<int, int>(r, c - 1));
}
}
return size;
}
int MaxShape(vector<vector<char>> &m)
{
int max_size = 0;
for (int r = 0; r < m.size(); ++r) {
for (int c = 0; c < m[r].size(); ++c) {
max_size = max(max_size, MaxShapeFromCell(m, r, c));
}
}
return max_size;
}
It's quite easy to derive a squared time algorithm (we just choose a center of the potential palindrome at each character and in between characters, and try to grow a palindrome from that center).
But there is a linear time algorithm for this. Manacher's algorithm.
int MaxSubarray(vector<int> const &a)
{
int max_sum = numeric_limits<int>::min();
int sum = 0;
int head_sum = 0;
for (int i = 0; i < a.size(); ++i) {
sum += a[i];
if (i > 0) {
head_sum += a[i - 1];
if (head_sum < 0) {
sum -= head_sum;
head_sum = 0;
}
}
max_sum = max(max_sum, sum);
}
return max_sum;
}
int CommonElement(vector<vector<int>> const &arrays)
{
int el = numeric_limits<int>::min();
vector<int> idxes;
idxes.resize(arrays.size(), 0);
for (int i = 0; i < arrays.size(); ++i) {
bool found = false;
bool greater = false;
while (idxes[i] < arrays[i].size()) {
int n = arrays[i][idxes[i]];
if (el == numeric_limits<int>::min() ||
n == el)
{
el = n;
found = true;
break;
}
if (n > el) {
el = n;
found = true;
greater = true;
break;
}
idxes[i]++;
}
if (!found) {
break;
}
if (greater) {
i = -1;
}
if (i == arrays.size() - 1) {
return el;
}
}
return numeric_limits<int>::min();
}
@ChrisK. If we do the transactions keeping track of the companies' balance, after all of the transactions the balances are the following:
Well Fargo: -485
BoA: 689
Chase: -204
Which, by hand can be written in two transactions:
Well Fargo pays 485 BoA
Chase pays 204 BoA
Your code produces 3 transactions:
from 2 to 1 $ 189
from 0 to 2 $ 674
from 0 to 1 $ 15
Probably a bug somewhere.
int MaxSubarrayK(vector<int> const &a, int k) {
int max_sum = numeric_limits<int>::min();
if (k > 0 &&
k <= a.size())
{
int head = 0;
int sum = 0;
for (int i = 0; i < a.size(); ++i) {
sum += a[i];
int size = i - head + 1;
if (size >= k) {
if (size > k) {
sum -= a[head++];
}
max_sum = max(max_sum, sum);
}
}
}
return max_sum;
}
string Balance(string const &s)
{
unordered_set<int> remove_idxes;
stack<int> st;
for (int i = 0; i < s.size(); ++i) {
if (s[i] == ')') {
if (st.empty()) {
remove_idxes.insert(i);
} else {
st.pop();
}
} else if (s[i] == '(') {
st.push(i);
}
}
while (!st.empty()) {
remove_idxes.insert(st.top());
st.pop();
}
string out;
for (int i = 0; i < s.size(); ++i) {
if (remove_idxes.find(i) == remove_idxes.end()) {
out += s[i];
}
}
return out;
}
class RingBuffer {
public:
RingBuffer(int size)
{
if (size > 0) {
// adjust size to support the read/write pointers overflow
int pow = 0;
while (size != 0) {
++pow;
size >>= 1;
}
size = 1 << pow;
data_.resize(size);
}
w_ = r_ = 0;
}
int Write(vector<int> const &block)
{
int written_count = min(block.size(), data_.size() - (w_ - r_));
for (int i = 0; i < written_count; ++i) {
data_[w_++ % data_.size()] = block[i];
}
return written_count;
}
int Read(int count, vector<int> &out)
{
out.clear();
int read_count = count > 0 ? min(count, w_ - r_) : 0;
for (int i = 0; i < read_count; ++i) {
out.push_back(data_[r_++ % data_.size()]);
}
return read_count;
}
private:
int w_, r_;
vector<int> data_;
};
The solution below is O(m) time (where m number of excluded numbers), and O(1) space. It can be optimized to O(log m) time, keeping O(1) space, by sorting the excluded numbers and using binary search to find number of excluded numbers which are less than the generated random number.
int Random(int n, vector<int> const &ex)
{
int rnd = -1;
if (n > ex.size()) {
rnd = rand() % (n - ex.size());
for (int i = 0; i < ex.size(); ++i) {
if (ex[i] <= rnd) {
++rnd;
}
}
}
return rnd;
}
The original question: BFS.
The follow up: Dijkstra (the code below).
class Node {
public:
Node(int row, int col)
{
row_ = row;
col_ = col;
}
int row_, col_;
};
class Label {
public:
Label(int parent_idx, int idx, Node node, int cost) : node_(node.row_, node.col_)
{
parent_idx_ = parent_idx;
idx_ = idx;
cost_ = cost;
}
bool operator>(Label const &other) const
{
return cost_ > other.cost_;
}
int idx_, parent_idx_;
Node node_;
int cost_;
};
vector <Node> ShortestPath(vector<vector<int>> const &m)
{
vector<Node> path;
if (!m.empty() &&
!m[0].empty())
{
priority_queue<Label, vector<Label>, greater<Label>> pq;
unordered_set<uint64_t> seen;
vector<Label> labels;
for (int col = 0; col < m[0].size(); ++col) {
labels.push_back(Label(-1, labels.size(), Node(0, col), m[0][col]));
pq.push(labels.back());
}
while (!pq.empty()) {
Label l = pq.top();
pq.pop();
Node n = l.node_;
uint64_t key = ((uint64_t)n.row_ << 32) | n.col_;
if (m[n.row_][n.col_] != 0 &&
seen.find(key) == seen.end())
{
seen.insert(key);
if (n.row_ == m.size() - 1) {
for (int i = l.idx_; i != -1; i = labels[i].parent_idx_) {
path.push_back(labels[i].node_);
}
reverse(path.begin(), path.end());
break;
}
if (n.row_ - 1 >= 0) {
labels.push_back(Label(l.idx_, labels.size(), Node(n.row_ - 1, n.col_), l.cost_ + m[n.row_ - 1][n.col_]));
pq.push(labels.back());
}
if (n.row_ + 1 < m.size()) {
labels.push_back(Label(l.idx_, labels.size(), Node(n.row_ + 1, n.col_), l.cost_ + m[n.row_ + 1][n.col_]));
pq.push(labels.back());
}
if (n.col_ - 1 >= 0) {
labels.push_back(Label(l.idx_, labels.size(), Node(n.row_, n.col_ - 1), l.cost_ + m[n.row_][n.col_ - 1]));
pq.push(labels.back());
}
if (n.col_ + 1 < m[0].size()) {
labels.push_back(Label(l.idx_, labels.size(), Node(n.row_, n.col_ + 1), l.cost_ + m[n.row_][n.col_ + 1]));
pq.push(labels.back());
}
}
}
}
return path;
}
My idea is to calc balances the companies have after running all of the transactions. Then, using max and min heaps, generate consolidated transactions.
Not sure if it's not possible to make amount of consolidated transactions less. Maybe DP should be used.
#include <unordered_map>
#include <vector>
#include <string>
#include <iostream>
#include <queue>
using namespace std;
class Transaction {
public:
Transaction(string const &payee, int amount, string const &payer)
{
payee_ = payee;
amount_ = amount;
payer_ = payer;
}
string payee_, payer_;
int amount_;
};
class Company {
public:
Company(string name, int balance)
{
name_ = name;
balance_ = balance;
}
string name_;
int balance_;
bool operator<(Company const &other) const
{
return balance_ < other.balance_;
}
};
vector<Transaction> Consolidate(vector<Transaction> const &transactions)
{
unordered_map<string, int> balance;
for (auto t : transactions) {
balance[t.payer_] -= t.amount_;
balance[t.payee_] += t.amount_;
}
priority_queue<Company> pq1, pq2;
for (auto el : balance) {
if (el.second > 0) {
pq1.push(Company(el.first, el.second));
} else if (el.second < 0) {
pq2.push(Company(el.first, el.second * -1));
}
}
vector<Transaction> out;
while (!pq1.empty()) {
Company pos_balance_company = pq1.top();
pq1.pop();
Company neg_balance_company = pq2.top();
pq2.pop();
int amount = min(pos_balance_company.balance_, neg_balance_company.balance_);
pos_balance_company.balance_ -= amount;
neg_balance_company.balance_ -= amount;
if (pos_balance_company.balance_ != 0) {
pq1.push(pos_balance_company);
} else {
pq2.push(neg_balance_company);
}
out.push_back(Transaction(pos_balance_company.name_, amount, neg_balance_company.name_));
}
return out;
}
int main(int argvc, char const **argv)
{
vector<Transaction> trans;
trans.push_back(Transaction("BoA", 132, "Chase"));
trans.push_back(Transaction("BoA", 827, "Chase"));
trans.push_back(Transaction("Well Fargo", 751, "BoA"));
trans.push_back(Transaction("BoA", 585, "Chase"));
trans.push_back(Transaction("Chase", 877, "Well Fargo"));
trans.push_back(Transaction("Well Fargo", 157, "Chase"));
trans.push_back(Transaction("Well Fargo", 904, "Chase"));
trans.push_back(Transaction("Chase", 976, "BoA"));
trans.push_back(Transaction("Chase", 548, "Well Fargo"));
trans.push_back(Transaction("BoA", 872, "Well Fargo"));
vector<Transaction> out = Consolidate(trans);
for (auto t : out) {
cout << t.payee_ << ", " << t.amount_ << ", " << t.payer_ << "\n";
}
return 0;
}
I assume that a manager can have their manager too, and that the hierarchy is acyclic (i.e. you can't be a manager of an upper of yours).
class Employee {
public:
Employee(int id)
{
id_ = id;
manager_ = NULL;
}
Employee *manager_;
unordered_set<Employee *> subordinates_;
int id_;
};
class Employment {
public:
void AssignManager(Employee *manager, Employee *subordinate)
{
if (manager &&
subordinate)
{
if (subordinate->manager_) {
subordinate->manager_->subordinates_.erase(subordinate);
}
subordinate->manager_ = manager;
subordinate->manager_->subordinates_.insert(subordinate);
}
}
void BeColleagues(Employee *e1, Employee *e2)
{
if (e1 &&
e2 &&
e1->manager_ != e2->manager_)
{
if (e1->manager_ == NULL) {
AssignManager(e2->manager_, e1);
} else if (e2->manager_ == NULL) {
AssignManager(e1->manager_, e2);
} else {
if (HierarchyLevel(e1) < HierarchyLevel(e2)) {
AssignManager(e1->manager_, e2);
} else {
AssignManager(e2->manager_, e1);
}
}
}
}
bool IsManager(Employee const *manager, Employee const *subordinate) const
{
if (manager &&
subordinate)
{
for (Employee const *upper = subordinate->manager_; upper != NULL; upper = upper->manager_) {
if (upper == manager) {
return true;
}
}
}
return false;
}
private:
int HierarchyLevel(Employee const *e) const
{
int level = 0;
if (e) {
for (Employee const *upper = e->manager_; upper != NULL; upper = upper->manager_) {
++level;
}
}
return level;
}
};
The code below is for the follow up situation.
Another way to build the synonym groups is to build a graph out of the synonym words, and find the synonym groups by extracting connected components out of the graph (using something like DFS BFS).
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <string>
#include <iostream>
using namespace std;
void MergeGroups(unordered_map<int, unordered_set<string>> &groups,
unordered_map<string, int> &word_to_group_id, int g1, int g2)
{
if (groups[g1].size() > groups[g2].size()) {
swap(g1, g2);
}
for (auto w : groups[g1]) {
word_to_group_id[w] = g2;
groups[g2].insert(w);
}
groups.erase(g1);
}
unordered_map<string, int> BuildWordToGroupIdMap(vector<pair<string, string>> const &synonyms)
{
unordered_map<int, unordered_set<string>> groups;
unordered_map<string, int> word_to_group_id;
auto end = word_to_group_id.end();
int new_group_id = 0;
for (auto s : synonyms) {
string const &w1 = s.first;
string const &w2 = s.second;
auto it1 = word_to_group_id.find(w1);
auto it2 = word_to_group_id.find(w2);
if (it1 == end &&
it2 != end)
{
word_to_group_id[w1] = it2->second;
groups[it2->second].insert(w1);
} else if (it1 != end &&
it2 == end)
{
word_to_group_id[w2] = it1->second;
groups[it1->second].insert(w2);
} else if (it1 == end &&
it2 == end)
{
int group_id = new_group_id++;
groups[group_id].insert(w1);
groups[group_id].insert(w2);
word_to_group_id[w1] = group_id;
word_to_group_id[w2] = group_id;
} else {
MergeGroups(groups, word_to_group_id, it1->second, it2->second);
}
}
return word_to_group_id;
}
vector<string> ParseSentence(string const &s)
{
vector<string> words;
int i = 0;
while (i < s.size()) {
while (i < s.size() &&
s[i] == ' ')
{
++i;
}
if (i < s.size()) {
int j = i;
while (j + 1 < s.size() && s[j + 1] != ' ') {
++j;
}
words.push_back(s.substr(i, j - i + 1));
i = j + 1;
}
}
return words;
}
bool SynonymSentences(string const &s1, string const &s2,
unordered_map<string, int> const &word_to_group_id)
{
vector<string> words1 = ParseSentence(s1);
vector<string> words2 = ParseSentence(s2);
if (words1.size() != words2.size()) {
return false;
}
for (int i = 0; i < words1.size(); ++i) {
auto it1 = word_to_group_id.find(words1[i]);
auto it2 = word_to_group_id.find(words2[i]);
if (it1 == word_to_group_id.end() ||
it2 == word_to_group_id.end() ||
it1->second != it2->second)
{
return false;
}
}
return true;
}
int main(int argvc, char const **argv)
{
vector<pair<string, string>> synonyms = {{"fast", "quick"}, {"fast", "speedy"}, {"learn", "study"}};
unordered_map<string, int> word_to_group_id = BuildWordToGroupIdMap(synonyms);
cout << SynonymSentences("learn quick", "study speedy", word_to_group_id) << "\n";
return 0;
}
int FindBlock(vector<int> const &a, int n, int block_size)
{
int result = -1;
if (block_size > 0 &&
a.size() >= block_size)
{
int n_idx = -1;
int l = 0;
int r = a.size() - 1;
while (l <= r &&
n_idx == -1)
{
int mid = (l + r) / 2;
if (a[mid] == n) {
n_idx = mid;
} else if (a[mid] > n) {
r = (r == mid) ? r - 1 : mid;
} else {
l = (l == mid) ? l + 1 : mid;
}
}
if (n_idx != -1) {
int l = n_idx;
int r = n_idx;
while (r - l + 1 < block_size) {
if (l == 0) {
++r;
} else if (r == a.size() - 1) {
--l;
} else {
if (n - a[l - 1] < a[r + 1] - n) {
--l;
} else {
++r;
}
}
}
result = l;
}
}
return result;
}
#include <vector>
#include <iostream>
using namespace std;
vector <pair<int, int>> MinPairs(vector<int> const &a1, vector<int> const &a2, int k)
{
vector<pair<int, int>> out;
if (k > 0 &&
a1.size() * a2.size() >= k)
{
int start1 = 0;
int start2 = 0;
int i = 0;
int j = 0;
while (out.size() < k) {
int idx1 = -1;
int idx2 = -1;
if (start1 + 1 < a1.size() &&
a1[start1 + 1] + a2[0] < a2[start2] + a1[i] &&
a1[start1 + 1] + a2[0] < a1[start1] + a2[j])
{
++start1;
idx1 = start1;
idx2 = 0;
j = 0;
} else if (start2 + 1 < a2.size() &&
a2[start2 + 1] + a1[0] < a1[start1] + a2[j] &&
a2[start2 + 1] + a1[0] < a2[start2] + a1[i])
{
++start2;
idx1 = 0;
idx2 = start2;
i = 0;
} else if (a1[start1] + a2[j] < a2[start2] + a1[i]) {
idx1 = start1;
idx2 = j++;
if (j >= a2.size()) {
j = 0;
++start1;
}
} else {
idx1 = i++;
idx2 = start2;
if (i >= a1.size()) {
i = 0;
++start2;
}
}
if (out.empty() ||
out.back().first != idx1 ||
out.back().second != idx2)
{
out.push_back(pair<int, int>(idx1, idx2));
}
}
}
return out;
}
int main(int argvc, char const **argv)
{
vector<int> a1 = {1, 2, 3, 6, 10};
vector<int> a2 = {1, 4, 5, 7};
vector<pair<int, int>> out = MinPairs(a1, a2, 5);
for (auto el : out) {
cout << a1[el.first] << ", " << a2[el.second] << " => " << (a1[el.first] + a2[el.second]) << "\n";
}
return 0;
}
@ChrisK. Yes, it may be not uniform in the sense of what the interviewer was looking for (more details in description of the task would be helpful). My idea is like at the start I have N nodes, and among them, I choose the parent of the tree uniformly. I.e. any node out of the N nodes can become the parent node with equal probability. Then, I do the same thing for the left and right subtrees.
- Alex August 31, 2017Assuming we need to build a tree that contains N nodes (N can also be random) and has a random structure, and random values.
Node *Build(int size)
{
Node *n = NULL;
if (size > 0) {
n = new Node(rand());
--size;
int left_size = rand() % (size + 1);
n->left_ = Build(left_size);
n->right_ = Build(size - left_size);
}
return n;
}
My idea is to find the max distance, which is sum of the distance hops in the correct station coordinates. Using dynamic programming, I get a combination of hops, and check if the combination satisfies the rest of the distances. To optimize, memoization can be used (not implemented in the code below.
#include <unordered_map>
#include <vector>
#include <iostream>
using namespace std;
bool CheckComb(unordered_map<int, int> d, vector<int> const &comb)
{
if (comb.size() < 2) {
return false;
}
for (int i = 0; i < comb.size(); ++i) {
int sum = 0;
for (int j = i; j < comb.size(); ++j) {
sum += comb[j];
if (j > i) {
if (d.find(sum) == d.end()) {
return false;
}
if (--d[sum] == 0) {
d.erase(sum);
}
}
}
}
return d.empty();
}
void Combine(unordered_map<int, int> &d, int total, vector<int> &comb, vector<vector<int>> &out)
{
if (total < 0) {
return;
}
if (total == 0) {
if (CheckComb(d, comb)) {
out.push_back(comb);
}
return;
}
vector<int> keys;
for (auto el : d) {
keys.push_back(el.first);
}
for (int k : keys) {
if (--d[k] == 0) {
d.erase(k);
}
comb.push_back(k);
Combine(d, total - k, comb, out);
comb.pop_back();
++d[k];
}
}
vector<vector<int>> StationPositions(vector<int> const &d)
{
int max_idx = 0;
for (int i = 1; i < d.size(); ++i) {
if (d[i] > d[max_idx]) {
max_idx = i;
}
}
int total = d[max_idx];
unordered_map<int, int> d_map;
for (int i = 0; i < d.size(); ++i) {
++d_map[d[i]];
}
vector<int> comb;
vector<vector<int>> out;
Combine(d_map, total, comb, out);
return out;
}
int main(int argvc, char const **argv)
{
vector<int> a = {10, 20, 70, 80, 30, 20, 100, 70, 50, 90};
vector<vector<int>> out = StationPositions(a);
for (auto comb : out) {
int pos = 0;
cout << pos << ", ";
for (int d : comb) {
cout << (pos += d) << ", ";
}
cout << "\n";
}
return 0;
}
class Cylinder {
public:
Cylinder(int height, int id)
{
h_ = height;
id_ = id;
}
int h_, id_;
};
void Reverse(vector<Cylinder> &a, int from, int to)
{
if (to > from) {
for (int i = from; i < to; ++i) {
if (a[i].h_ == a[i + 1].h_) {
int dups_end_idx = i + 1;
while (dups_end_idx + 1 <= to &&
a[dups_end_idx + 1].h_ == a[i].h_)
{
++dups_end_idx;
}
reverse(a.begin() + i, a.begin() + dups_end_idx + 1);
}
}
reverse(a.begin(), a.begin() + to + 1);
}
}
void RoboSort(vector<Cylinder> &a)
{
for (int i = 0; i < a.size(); ++i) {
int min_idx = i;
for (int j = i + 1; j < a.size(); ++j) {
if (a[j].h_ < a[min_idx].h_) {
min_idx = j;
}
}
Reverse(a, i, min_idx);
}
}
class Lamps {
public:
Lamps(int size)
{
size_ = size;
block_size_ = sizeof(map_[0]) * 8;
map_.resize(size / block_size_ + (size % block_size_ != 0 ? 1 : 0));
}
void Toggle(int from, int to)
{
if (from >= 0 &&
from < size_ &&
to >= 0 &&
to < size_ &&
from <= to)
{
int start_block_idx = from / block_size_;
int end_block_idx = to / block_size_;
int head_size = from % block_size_;
int tail_size = block_size_ - ((to + 1) % block_size_);
uint64_t head_mask = ~((uint64_t)-1 >> head_size);
uint64_t tail_mask = ~((uint64_t)-1 << tail_size);
uint64_t head_stored = map_[start_block_idx];
uint64_t tail_stored = map_[end_block_idx];
for (int i = start_block_idx; i <= end_block_idx; ++i) {
map_[i] = ~map_[i];
}
map_[start_block_idx] &= ~head_mask;
map_[start_block_idx] |= head_stored & head_mask;
map_[end_block_idx] &= ~tail_mask;
map_[end_block_idx] |= tail_stored & tail_mask;
}
}
bool IsOn(int idx)
{
bool on = false;
if (idx >= 0 &&
idx < size_)
{
on = (map_[idx / block_size_] >> (block_size_ - ((idx + 1) % block_size_))) & 0x01;
}
return on;
}
private:
int size_;
int block_size_;
vector<uint64_t> map_;
};
Works for positive and negative numbers.
pair<double, double> Extremes(double v1, double v2)
{
double min_val = min(v1 + v2, v1 - v2);
min_val = min(min_val, v1 * v2);
double max_val = max(v1 + v2, v1 - v2);
max_val = max(max_val, v1 * v2);
return pair<double, double>(min_val, max_val);
}
pair<double, double> MinMax(vector<double> const &a, int idx, int size,
map<pair<int, int>, pair<double, double>> &memo)
{
if (size == 0 ||
idx < 0 ||
idx >= a.size())
{
return pair<double, double>(0, 0);
}
if (size == 1) {
return pair<double, double>(a[idx], a[idx]);
}
pair<int, int> key = pair<int, int>(idx, size);
auto it = memo.find(key);
if (it != memo.end()) {
return it->second;
}
double min_v = numeric_limits<double>::max();
double max_v = numeric_limits<double>::lowest();
for (int i = 1; i < size; ++i) {
pair<double, double> left = MinMax(a, idx, i, memo);
pair<double, double> right = MinMax(a, idx + i, size - i, memo);
pair<double, double> p1 = Extremes(left.first, right.first);
pair<double, double> p2 = Extremes(left.first, right.second);
pair<double, double> p3 = Extremes(left.second, right.first);
pair<double, double> p4 = Extremes(left.second, right.second);
min_v = min(min_v, p1.first);
min_v = min(min_v, p2.first);
min_v = min(min_v, p3.first);
min_v = min(min_v, p4.first);
max_v = max(max_v, p1.second);
max_v = max(max_v, p2.second);
max_v = max(max_v, p3.second);
max_v = max(max_v, p4.second);
}
memo[key] = pair<double, double>(min_v, max_v);
return memo[key];
}
void PrintPaths(int n, vector<int> &comb, int sum = 0)
{
if (n == 0) {
cout << 0 << (comb.empty() ? "" : ", ");
for (int v : comb) {
cout << v << ", ";
}
cout << "\n";
return;
}
if (n < 0) {
return;
}
for (int v = 1; n - v >= 0; v <<= 1) {
comb.push_back(sum + v);
PrintPaths(n - v, comb, sum + v);
comb.pop_back();
}
}
class EmptyRange {
public:
EmptyRange(int from_idx, int to_idx) : from_idx_(from_idx), to_idx_(to_idx)
{
}
bool operator<(EmptyRange const &other) const
{
int len = to_idx_ - from_idx_;
int len_other = other.to_idx_ - other.from_idx_;
if (len == len_other) {
return from_idx_ > other.from_idx_;
}
return len < len_other;
}
int from_idx_, to_idx_;
};
class Row {
public:
Row(int size)
{
if (size < 2) {
cerr << "invalid size\n";
exit(-1);
}
seats_.resize(size, -1);
pq_.push(EmptyRange(0, seats_.size() - 1));
}
int AddStudent(int id)
{
if (id < 0 ||
students_.size() >= seats_.size() ||
students_.find(id) != students_.end())
{
return -1;
}
EmptyRange r = pq_.top();
pq_.pop();
int idx = -1;
if (r.from_idx_ == 0) {
idx = 0;
pq_.push(EmptyRange(1, r.to_idx_));
} else if (r.to_idx_ == seats_.size() - 1) {
idx = seats_.size() - 1;
pq_.push(EmptyRange(r.from_idx_, seats_.size() - 2));
} else {
idx = r.from_idx_ + (r.to_idx_ - r.from_idx_) / 2;
if (idx > r.from_idx_) {
pq_.push(EmptyRange(r.from_idx_, idx - 1));
}
if (idx < r.to_idx_) {
pq_.push(EmptyRange(idx + 1, r.to_idx_));
}
}
seats_[idx] = id;
students_[id] = idx;
return idx;
}
void RemoveStudent(int id)
{
auto it = students_.find(id);
if (it != students_.end()) {
int idx = it->second;
seats_[idx] = -1;
students_.erase(id);
RebuildPQ();
}
}
private:
void RebuildPQ()
{
while (!pq_.empty()) {pq_.pop();} // should be pq_.clear();
int from_idx = -1;
int to_idx = -1;
for (int i = 0; i < seats_.size(); ++i) {
if (from_idx == -1) {
if (seats_[i] == -1) {
from_idx = i;
}
} else {
if (seats_[i] != -1 ||
i == seats_.size() - 1)
{
int to_idx = seats_[i] != -1 ? i - 1 : i;
pq_.push(EmptyRange(from_idx, to_idx));
from_idx = -1;
}
}
}
}
vector<int> seats_;
unordered_map<int, int> students_;
priority_queue<EmptyRange> pq_;
};
If the interviewer didn't specify that all values are within 1..n, then, I'd use the max and min heaps solution, which would have time complexity for adding N numbers: O(N log N), and time complexity for getting the median: O(1).
Since they mentioned 1..n, probably they'd like to see something like below, with time complexity for adding N numbers: O(N), and time complexity for getting the median: O(N).
class Median {
public:
void AddNumber(uint32_t n)
{
if (n >= data_.size()) {
data_.resize(max(n + 1, (uint32_t)(data_.size() * 2)));
}
++data_[n];
++size_;
}
double Get()
{
double median = 0;
if (size_ > 0) {
int idx = (size_ - 1) / 2;
uint32_t v1 = 0;
uint32_t v2 = 0;
GetVals(v1, v2, idx);
median = size_ % 2 == 0 ? (v1 + v2) / 2.0 : v1;
}
return median;
}
private:
void GetVals(uint32_t &v1, uint32_t &v2, int idx)
{
v1 = v2 = 0;
int count = 0;
for (uint32_t i = 0; i < data_.size(); ++i) {
count += data_[i];
if (count > idx) {
v1 = i;
v2 = count > idx + 1 ? i : i + 1;
return;
}
}
}
vector<int> data_;
int size_;
};
#include <string>
#include <mutex>
#include <thread>
#include <iostream>
#include <dirent.h>
#include <vector>
#include <sys/types.h>
#include <unistd.h>
using namespace std;
vector<string> buf;
int produced_count = 0;
int consumed_count = 0;
condition_variable counts_cv;
mutex counts_mutex;
vector<thread> threads;
int idle_threads_count = 0;
bool stop = false;
void ProduceFile(string const item)
{
unique_lock<mutex> lock(counts_mutex);
while (produced_count - consumed_count == buf.size()) {
counts_cv.wait(lock);
}
buf[produced_count++ % buf.size()] = item;
counts_cv.notify_all();
cerr << "produced: " << item << "\n";
}
void ConsumeFiles()
{
while (true) {
unique_lock<mutex> lock(counts_mutex);
while (produced_count == consumed_count) {
++idle_threads_count;
counts_cv.notify_all();
counts_cv.wait(lock);
--idle_threads_count;
if (stop) {
return;
}
}
string item = buf[consumed_count++ % buf.size()];
counts_cv.notify_all();
lock.unlock();
cerr << "removing file: " << this_thread::get_id() << ": " << item << "\n";
}
}
void RemoveEmptyDir(string const &dir)
{
unique_lock<mutex> lock(counts_mutex);
while (idle_threads_count != threads.size()) {
counts_cv.wait(lock);
}
cerr << "removing dir: " << dir << "\n";
}
void Rm(string const &dir_name)
{
DIR *dir, *dir2;
struct dirent *ent;
if ((dir = opendir(dir_name.c_str())) != NULL) {
while (ent = readdir(dir)) {
string item_name(ent->d_name);
string item(dir_name + '/' + item_name);
if (item_name != "." &&
item_name != "..")
{
if ((dir2 = opendir(item.c_str())) != NULL) {
closedir(dir2);
Rm(item);
} else {
ProduceFile(item);
}
}
}
closedir(dir);
RemoveEmptyDir(dir_name);
}
}
void RemoveDir(string const &dir_name)
{
buf.resize(16); // must be true: (INT_MAX + 1) % buf.size() == 0
for (int i = 0; i < 8; ++i) {
threads.push_back(thread(ConsumeFiles));
}
Rm(dir_name);
unique_lock<mutex> lock(counts_mutex);
stop = true;
lock.unlock();
counts_cv.notify_all();
for (int i = 0; i < threads.size(); ++i) {
threads[i].join();
}
}
int main(int argvc, char const **argv)
{
RemoveDir("./tmp");
return 0;
}
class TwoSum {
public:
void Store(int n)
{
++map_[n];
}
bool Test(int sum)
{
for (auto el : map_) {
int remainder = sum - el.first;
auto it = map_.find(remainder);
if (it != map_.end()) {
if (remainder != el.first ||
it->second > 1)
{
return true;
}
}
}
return false;
}
private:
unordered_map<int, int> map_;
};
vector<int> GetUniques(vector<int> const &a)
{
unordered_map<int, int> counts;
for (int n : a) {
++counts[n];
}
vector<int> out;
for (auto const &count : counts) {
if (count.second == 1) {
out.push_back(count.first);
}
}
return out;
}
vector<int> GetUniquesSortedInput(vector<int> const &a)
{
vector<int> out;
int size = a.size();
for (int i = 0; i < size; ++i) {
while (i + 1 < size &&
a[i + 1] == a[i])
{
int val = a[i];
while (i < size &&
a[i] == val)
{
++i;
}
}
if (i < size) {
out.push_back(a[i]);
}
}
return out;
}
Node const *LowestAncestorQ1(Node const *n, int val1, int val2)
{
while (n) {
if (val1 > n->val_ &&
val2 > n->val_)
{
n = n->right_;
} else if (
val1 < n->val_ &&
val2 < n->val_)
{
n = n->left_;
} else {
break;
}
}
return n;
}
Node const *LowestAncestorQ2(Node const *n, int val1, int val2)
{
while (n) {
if (val1 > n->val_ &&
val2 > n->val_)
{
n = n->right_;
} else if (
val1 < n->val_ &&
val2 < n->val_)
{
n = n->left_;
} else {
if (n->left_ &&
n->left_->val_ == n->val_)
{
n = n->left_;
} else {
break;
}
}
}
return n;
}
int LowestAncestorQ3(Node const *n, int val1, int val2, Node const **ancestor)
{
int count = 0;
if (!*ancestor &&
n)
{
if (n->val_ == val1 ||
n->val_ == val2)
{
++count;
}
count += LowestAncestorQ3(n->left_, val1, val2, ancestor);
count += LowestAncestorQ3(n->right_, val1, val2, ancestor);
if (count >= 2 &&
!*ancestor)
{
*ancestor = n;
}
}
return count;
}
Time complexity: O(N)
int MaxProd3(vector<int> const &a)
{
int max_prod3 = 0;
if (a.size() >= 3) {
int max_val = max(a[0], a[1]);
int max_prod2 = a[0] * a[1];
int min_val = min(a[0], a[1]);
int min_prod2 = a[0] * a[1];
max_prod3 = a[0] * a[1] * a[2];
for (int i = 2; i < a.size(); ++i) {
max_prod3 = max(max_prod3, a[i] * max_prod2);
max_prod3 = max(max_prod3, a[i] * min_prod2);
max_prod2 = max(max_prod2, max_val * a[i]);
max_prod2 = max(max_prod2, min_val * a[i]);
min_prod2 = min(min_prod2, max_val * a[i]);
min_prod2 = min(min_prod2, min_val * a[i]);
max_val = max(max_val, a[i]);
min_val = min(min_val, a[i]);
}
}
return max_prod3;
}
#include <thread>
#include <condition_variable>
#include <mutex>
#include <iostream>
#include <vector>
#include <unistd.h>
using namespace std;
vector<int> buffer;
uint64_t produced_count = 0;
uint64_t consumed_count = 0;
mutex counts_mutex;
condition_variable counts_cv;
void producer()
{
while (true) {
unique_lock<mutex> counts_lock(counts_mutex);
while (produced_count - consumed_count == buffer.size()) {
counts_cv.wait(counts_lock);
}
int product = rand();
buffer[produced_count++ % buffer.size()] = product;
cout << "produced: " << product << "\n";
counts_cv.notify_all();
}
}
void consumer()
{
while (true) {
unique_lock<mutex> counts_lock(counts_mutex);
while (produced_count - consumed_count == 0) {
counts_cv.wait(counts_lock);
}
int product = buffer[consumed_count++ % buffer.size()];
cout << "consumed: " << product << "\n";
counts_cv.notify_all();
}
}
int main(int argvc, char const **argv)
{
buffer.resize(16, 0);
// to handle the uint64_t counts overflow, the following must be true:
// (0xffffffffffffffff + 1) % buffer.size() == 0
vector<thread> threads;
for (int i = 0; i < 3; ++i) {
threads.push_back(thread(producer));
}
sleep(3);
for (int i = 0; i < 3; ++i) {
threads.push_back(thread(consumer));
}
for (int i = 0; i < threads.size(); ++i) {
threads[i].join();
}
return 0;
}
void GetReporters(Employee *e, int id, vector<Employee *> &reporters, bool found = false, bool processed = false)
{
if (!processed &&
e)
{
if (found) {
reporters.push_back(e);
}
if (e->id_ == id) {
found = true;
}
for (auto reporter : e->reporters_) {
GetReporters(reporter, id, reporters, found, processed);
}
if (e->id_ == id) {
processed = true;
}
}
}
int CommonAncestor(Node const *n, Node const *n1, Node const *n2, Node const **ancestor)
{
int count = 0;
if (n &&
n1 &&
n2 &&
n1 != n2 &&
*ancestor == NULL)
{
if (n == n1 ||
n == n2)
{
++count;
}
count += CommonAncestor(n->left_, n1, n2, ancestor);
if (count < 2) {
count += CommonAncestor(n->right_, n1, n2, ancestor);
}
if (count == 2) {
*ancestor = n;
count = 0;
}
}
return count;
}
void PrintDiagonals(vector<vector<int>> const &matrix)
{
if (!matrix.empty() &&
!matrix[0].empty())
{
int m = matrix.size();
int n = matrix[0].size();
int start_r = m - 1;
int start_c = 0;
while (start_c < n) {
int r = start_r;
int c = start_c;
while (r < m &&
c < n)
{
cout << matrix[r][c] << ", ";
++r;
++c;
}
cout << "\n";
if (start_r > 0) {
--start_r;
} else {
++start_c;
}
}
}
}
class Node {
public:
vector<int> vals_;
vector<Node> children_;
};
int ReversedDepthWeigtedSum(Node const *node, int &sum)
{
int depth = 0;
if (node) {
for (Node const &child : node->children_) {
depth = max(depth, ReversedDepthWeigtedSum(&child, sum));
}
++depth;
for (int val : node->vals_) {
sum += val * depth;
}
}
return depth;
}
int GetRandom(vector<int> const &a, vector<int> const &freq)
{
int idx = -1;
if (a.size() == freq.size()) {
int sum = 0;
for (int i = 0; i < freq.size(); ++i) {
if (rand() % (sum + freq[i]) >= sum) {
idx = i;
}
sum += freq[i];
}
}
return idx == -1 ? numeric_limits<int>::min() : a[idx];
}
RepSWE
RepHire high professional child development center in Charlotte. Pal-A-Roo’s Child Development Center is a family-owned child care facility offering ...
Rep
RepIf you want to attract someone then astrologer kumar can help you. Astrologer kumar is specialized in offering kamdev vashikaran ...
Top down DP. Time and space: O(number_of_coins * amount * max(count_of_coins) ).
- Alex September 27, 2017