Alex
BAN USERWe can first find the nodes which have the maximum sum of heights of the left and right sub trees. After that, for each such node (top_node below), we find all longest paths in the left sub tree and in the right sub tree. Then we combine the paths to find all paths having the maximum length.
#include <iostream>
#include <vector>
using namespace std;
class Node {
public:
Node(int val)
{
val_ = val;
left_ = NULL;
right_ = NULL;
}
int val_;
Node *left_;
Node *right_;
};
int GetHeight(Node *n, int &longest_paths_len,
vector<Node *> &longest_paths_top_nodes)
{
if (!n) {
return 0;
}
int left_height = GetHeight(n->left_, longest_paths_len, longest_paths_top_nodes);
int right_height = GetHeight(n->right_, longest_paths_len, longest_paths_top_nodes);
int max_len = left_height + 1 + right_height;
if (max_len >= longest_paths_len) {
if (max_len > longest_paths_len) {
longest_paths_top_nodes.clear();
}
longest_paths_len = max_len;
longest_paths_top_nodes.push_back(n);
}
return max(left_height, right_height) + 1;
}
void FindLongestPath(Node *n, vector<Node *> &path, int &longest_paths_len,
vector<vector<Node *>> &longest_paths)
{
if (n) {
path.push_back(n);
if (path.size() >= longest_paths_len) {
if (path.size() > longest_paths_len) {
longest_paths.clear();
}
longest_paths.push_back(path);
longest_paths_len = path.size();
}
FindLongestPath(n->left_, path, longest_paths_len, longest_paths);
FindLongestPath(n->right_, path, longest_paths_len, longest_paths);
path.pop_back();
}
}
vector<vector<Node *>> FindLongestPaths(Node *root)
{
vector<Node *> longest_paths_top_nodes;
int longest_paths_len = 0;
GetHeight(root, longest_paths_len, longest_paths_top_nodes);
vector<vector<Node *>> longest_paths;
for (Node *top_node : longest_paths_top_nodes) {
int longest_paths_len = 0;
vector<Node *> path;
vector<vector<Node *>> left_longest_paths;
FindLongestPath(top_node->left_, path, longest_paths_len, left_longest_paths);
longest_paths_len = 0;
path.clear();
vector<vector<Node *>> right_longest_paths;
FindLongestPath(top_node->right_, path, longest_paths_len, right_longest_paths);
if (left_longest_paths.empty()) {
left_longest_paths.push_back(vector<Node *>());
}
if (right_longest_paths.empty()) {
right_longest_paths.push_back(vector<Node *>());
}
for (auto const &left_path : left_longest_paths) {
for (auto const &right_path : right_longest_paths) {
path.clear();
path.insert(path.end(), left_path.begin(), left_path.end());
reverse(path.begin(), path.end());
path.push_back(top_node);
path.insert(path.end(), right_path.begin(), right_path.end());
longest_paths.push_back(path);
}
}
}
return longest_paths;
}
int main()
{
Node n1(1);
Node n2(2);
Node n3(3);
Node n4(4);
Node n5(5);
n1.left_ = &n2;
n1.right_ = &n3;
n2.left_ = &n3;
n2.left_ = &n4;
n2.right_ = &n5;
vector<vector<Node *>> paths = FindLongestPaths(&n1);
for (auto const &path : paths) {
for (Node *n : path) {
cout << "->" << n->val_;
}
cout << "\n";
}
return 0;
}
We can pre-process the file, building word => positions_in_file hash map. Time complexity of this step: O(N) (where N is number of words in the file, and we assume there is a constant limit on length of any word).
At the query time (the code below), we combine positions of the needed words, sort them, and find the smallest window using those positions. The worst case time complexity: O(N log N).
#include <iostream>
#include <vector>
#include <unordered_map>
#include <unordered_set>
using namespace std;
pair<int, int> MinWindow(unordered_map<string, vector<int>> const &word_positions,
vector<string> const &words_to_find)
{
unordered_set<string> unique_words_to_find;
for (auto const &word : words_to_find) {
unique_words_to_find.insert(word);
}
vector<int> positions;
unordered_map<int, string> word_at_position;
for (auto const &word : unique_words_to_find) {
auto it = word_positions.find(word);
if (it == word_positions.end()) {
return pair<int, int>(-1, -1);
}
vector<int> const &word_positions = it->second;
positions.insert(positions.end(), word_positions.begin(), word_positions.end());
for (int pos : word_positions) {
word_at_position[pos] = word;
}
}
if (positions.size() < words_to_find.size()) {
return pair<int, int>(-1, -1);
}
sort(positions.begin(), positions.end());
unordered_map<string, int> pattern_hash, hash;
for (string const &word : words_to_find) {
++pattern_hash[word];
}
pair<int, int> min_window = pair<int, int>(-1, -1);
int start = 0;
int words_matched_count = 0;
for (int i = 0; i < positions.size(); ++i) {
int pos = positions[i];
string const &word = word_at_position[pos];
++hash[word];
if (hash[word] <= pattern_hash[word]) {
++words_matched_count;
}
if (words_matched_count == words_to_find.size()) {
while (true) {
string const &word_at_start = word_at_position[positions[start]];
if (hash[word_at_start] > pattern_hash[word_at_start]) {
++start;
--hash[word_at_start];
} else {
break;
}
}
int window_size = positions[i] - positions[start] + 1;
if (min_window.first == -1 ||
window_size < min_window.second - min_window.first + 1)
{
min_window = pair<int, int>(positions[start], positions[i]);
}
}
}
return min_window;
}
int main()
{
unordered_map<string, vector<int>> word_positions;
word_positions["cat"] = {1, 8, 45};
word_positions["rat"] = {5, 27};
word_positions["bat"] = {64, 128};
pair<int, int> window = MinWindow(word_positions, {"cat", "rat", "bat"});
cout << window.first << "->" << window.second << "\n";
return 0;
}
class Ints {
public:
Ints()
{
addition_ = 0;
factor_ = 1;
}
void Append(int x)
{
vals_.push_back(x - addition_);
base_factor_.push_back(factor_);
}
void AddToAll(int x)
{
addition_ += x;
}
void MultiplyAll(int x)
{
factor_ *= x;
addition_ *= x;
}
int Get(int idx) const
{
return idx < vals_.size() && idx >= 0
? vals_[idx] * (factor_ / base_factor_[idx]) + addition_
: 0;
}
private:
vector<int> vals_;
vector<int> base_factor_;
int addition_, factor_;
};
I assume that after inserts the tree should stay complete.
First we find the parent for the new node using a recursive in-order traversal, looking for a node which has a child missing, and which has minimum depth. One optimization applied is that when the minimum depth gets better we can stop searching.
Time: O(N).
Space: O(log N).
class Node {
public:
Node(int val)
{
val_ = val;
left_ = right_ = NULL;
}
int val_;
Node *left_, *right_;
};
void FindParent(Node *n, Node* &parent, int &parent_min_depth, bool &stop_searching, int depth = 0)
{
if (n &&
!stop_searching)
{
FindParent(n->left_, parent, parent_min_depth, stop_searching, depth + 1);
if (!n->left_ ||
!n->right_)
{
if (depth < parent_min_depth) {
if (parent_min_depth != numeric_limits<int>::max()) {
stop_searching = true;
}
parent_min_depth = depth;
parent = n;
}
}
FindParent(n->right_, parent, parent_min_depth, stop_searching, depth + 1);
}
}
void Insert(Node *root, int val)
{
Node *parent;
int parent_min_depth = numeric_limits<int>::max();
bool stop_searching = false;
FindParent(root, parent, parent_min_depth, stop_searching);
Node *n = new Node(val);
if (!parent->left_) {
parent->left_ = n;
} else {
parent->right_ = n;
}
}
I assume the input is not really nodes of the linked list, but some abstract pointers. A pointer only tells us that there is a connection (either using next or prev of a linked in node) between two nodes.
In this case terminal nodes will be mentioned exactly 2 times in the pointers. Interior nodes will be mentioned 4 times. Each independent block contains 2 terminal nodes (head and tail).
class Pointer {
public:
int from_node_;
int to_node_;
};
int BlocksCount(vector<Pointer> const &pointers)
{
unordered_map<int, int> node_counts;
for (Pointer const &pointer : pointers) {
++node_counts[pointer.from_node_];
++node_counts[pointer.to_node_];
}
int terminal_nodes_count = 0;
for (auto const &node_count : node_counts) {
if (node_count.second == 2) {
++terminal_nodes_count;
} else if (node_count.second != 4) {
cerr << "invalid data\n";
exit(-1);
}
}
if (terminal_nodes_count & 0x01) {
cerr << "invalid data\n";
exit(-1);
}
return terminal_nodes_count / 2;
}
void PrintAbbreviations(string const &s, string const abbr = "", int idx = 0)
{
if (idx >= s.size()) {
if (!abbr.empty() &&
abbr != s)
{
cout << abbr << "\n";
}
return;
}
PrintAbbreviations(s, abbr + s[idx], idx + 1);
bool prev_digit = !abbr.empty() && isdigit(abbr.back()) ? true : false;
if (!prev_digit) {
for (int i = idx; i < s.size(); ++i) {
PrintAbbreviations(s, abbr + to_string(i - idx + 1), i + 1);
}
}
}
void GetLeaves(vector<int> const &a, vector<int> &leaves,
int start = -1, int end = -1)
{
if (start == -1) {
start = 0;
end = a.size();
leaves.clear();
}
if (start < 0 ||
end < 0 ||
end > a.size() ||
start >= end)
{
return;
}
if (end - start == 1) {
leaves.push_back(a[start]);
return;
}
for (int i = start + 1; i < end; ++ i) {
if (a[i] >= a[start] ||
i + 1 == end)
{
GetLeaves(a, leaves, start + 1, i);
GetLeaves(a, leaves, i, end);
break;
}
}
}
We can sort the meetings by start time, and then, compare situations of taking the particular meeting and skipping it:
class Meeting {
public:
int start_;
int end_;
int weight_;
Meeting(int start, int end, int weight)
{
start_ = start;
end_ = end;
weight_ = weight;
}
bool operator < (Meeting const &other) const
{
if (start_ == other.start_) {
return end_ < other.end_;
}
return start_ < other.start_;
}
};
class Result {
public:
int total_weight_;
vector<Meeting> meetings_;
bool valid_;
Result(bool valid)
{
valid_ = valid;
total_weight_ = 0;
}
};
Result OptimalSchedule(vector<Meeting> const &meetings, vector<Result> &memo, int idx = 0)
{
if (idx >= meetings.size() ||
idx < 0)
{
return Result(true);
}
if (idx == 0) {
memo.clear();
memo.resize(meetings.size(), Result(false));
}
if (memo[idx].valid_) {
return memo[idx];
}
int next_idx = idx + 1;
while (next_idx < meetings.size() &&
meetings[next_idx].start_ <= meetings[idx].end_)
{
++next_idx;
}
Result take = OptimalSchedule(meetings, memo, next_idx);
take.total_weight_ += meetings[idx].weight_;
take.meetings_.push_back(meetings[idx]);
Result skip = OptimalSchedule(meetings, memo, idx + 1);
memo[idx] = take.total_weight_ > skip.total_weight_ ? take : skip;
return memo[idx];
}
Result OptimalSchedule(vector<Meeting> &meetings)
{
sort(meetings.begin(), meetings.end());
vector<Result> memo;
return OptimalSchedule(meetings, memo);
}
The greedy algorithm (sorting tasks in descending order and filling the least loaded worker so far) doesn't guarantee the optimal solution. E.g. 2 workers and these tasks: 9,8,7,5,3.
Looks like it's needed to iterate through all combinations of assignment a task to a worker:
bool BalanceSetNewComb(vector<int> &task_worker, int workers_count)
{
int i = 0;
for (; i < task_worker.size(); i++) {
++task_worker[i];
if (task_worker[i] < workers_count) {
break;
}
task_worker[i] = 0;
}
return i < task_worker.size();
}
int BalanceGetCombTime(vector<int> const &tasks, vector<int> const &task_worker, int workers_count)
{
vector<int> worker_time;
worker_time.resize(workers_count, 0);
int max_time = 0;
for (int i = 0; i < tasks.size(); ++i) {
int worker = task_worker[i];
worker_time[worker] += tasks[i];
max_time = max(max_time, worker_time[worker]);
}
return max_time;
}
int Balance(vector<int> const &tasks, int workers_count)
{
vector<int> task_worker;
task_worker.resize(tasks.size(), 0);
int min_time = numeric_limits<int>::max();
do {
int time = BalanceGetCombTime(tasks, task_worker, workers_count);
min_time = min(min_time, time);
} while (BalanceSetNewComb(task_worker, workers_count));
return min_time;
}
int Split(vector<int> const &a)
{
int sum = 0;
for (int n : a) {
sum += n;
}
if (sum > 0 &&
sum % 2 == 0)
{
int half_sum = sum / 2;
sum = 0;
for (int i = 0; i + 1 < a.size() && sum <= half_sum; ++i) {
sum += a[i];
if (sum == half_sum) {
return i + 1;
}
}
}
return -1;
}
string Balance(string const &s)
{
string out;
int open_count = 0;
for (char c : s) {
if (c == '(') {
++open_count;
} else if (c == ')') {
--open_count;
if (open_count < 0) {
out += '(';
++open_count;
}
}
out += c;
}
for (int i = 0; i < open_count; ++i) {
out += ')';
}
return out;
}
bool CanRunTasks(vector<int> servers, vector<int> tasks)
{
sort(servers.begin(), servers.end(), greater<int>());
sort(tasks.begin(), tasks.end(), greater<int>());
for (int i = 0; i < tasks.size(); ++i) {
for (int j = 0; j < servers.size() && tasks[i] > 0; ++j) {
if (servers[j] > 0) {
--servers[j];
--tasks[i];
}
}
if (tasks[i] > 0) {
return false;
}
}
return true;
}
int MaxSubarray(vector<int> const &a, int k)
{
int max_sum = numeric_limits<int>::min();
if (k > 0 &&
k <= a.size())
{
int start = 0;
int sum = 0;
int tail_sum = 0;
for (int i = 0; i < a.size(); ++i) {
sum += a[i];
if (i >= k) {
tail_sum += a[i - k];
if (tail_sum <= 0) {
sum -= tail_sum;
tail_sum = 0;
start = i - k + 1;
}
}
if (i - start + 1 >= k) {
max_sum = max(max_sum, sum);
}
}
}
return max_sum;
}
void Store(Node const *n, vector<int> &list)
{
if (n == NULL) {
list.push_back(numeric_limits<int>::min());
return;
}
list.push_back(n->val_);
Store(n->left_, list);
Store(n->right_, list);
}
Node *Restore(vector<int> const &list, int &idx)
{
if (list[idx] == numeric_limits<int>::min()) {
++idx;
return NULL;
}
Node *n = new Node(list[idx++]);
n->left_ = Restore(list, idx);
n->right_ = Restore(list, idx);
return n;
}
vector<string> Combs(string const &s, int idx = 0)
{
vector<string> combs;
if (idx == s.size()) {
if (idx != 0) {
combs.push_back("");
}
return combs;
}
if (idx < 0 ||
idx > s.size())
{
return combs;
}
vector<string> sub_combs = Combs(s, idx + 1);
vector<char> prefixes{s[idx]};
if (isalpha(s[idx])) {
prefixes.push_back(isupper(s[idx]) ? tolower(s[idx]) : toupper(s[idx]));
}
for (char prefix : prefixes) {
for (auto const &sub_comb : sub_combs) {
combs.push_back(prefix + sub_comb);
}
}
return combs;
}
My idea is to generate permutations (with duplicated characters allowed), but while the process to check if the particular character is forbidden to take the particular place or not.
vector<string> Dearrangements(string s,
set<pair<char, int>> const &forbidden_positions, int pos = 0)
{
vector<string> dearrangements;
if (s.empty()) {
if (pos != 0) {
dearrangements.push_back("");
}
return dearrangements;
}
for (int i = 0; i < s.size(); ++i) {
if (i > 0 &&
s[i - 1] == s[i])
{
continue;
}
if (forbidden_positions.find(pair<char, int>(s[i], pos)) !=
forbidden_positions.end())
{
continue;
}
string remainder = s;
remainder.erase(remainder.begin() + i);
vector<string> sub_dearrs = Dearrangements(remainder, forbidden_positions, pos + 1);
for (auto const &sub_dearr : sub_dearrs) {
dearrangements.push_back(s[i] + sub_dearr);
}
}
return dearrangements;
}
vector<string> Dearrangements(string s)
{
set<pair<char, int>> forbidden_positions;
for (int i = 0; i < s.size(); ++i) {
forbidden_positions.insert(pair<char, int>(s[i], i));
}
sort(s.begin(), s.end());
return Dearrangements(s, forbidden_positions);
}
1. O(k * n). We can go k times through the array. For i-th closest point, we find the closest point out of points i throug n. The closest point gets swapped with i-th point.
2. O(log k * n). We can use a heap (the code below).
3. Aa preprocessing, we can insert all of the points into a structure like R-Tree. Finding k closest point would be taking expected O(log n).
vector<Point> Closest(vector<Point> const &a, Point const &origin, int k)
{
vector<Point> closest;
if (a.size() >= k &&
k > 0)
{
priority_queue<PQEntry> pq;
for (int i = 0; i < a.size(); ++i) {
double dx = origin.x_ - a[i].x_;
double dy = origin.y_ - a[i].y_;
double distance = dx * dx + dy * dy;
bool insert = false;
if (i < k) {
insert = true;
} else if (distance < pq.top().distance_) {
pq.pop();
insert = true;
}
if (insert) {
pq.push(PQEntry(i, distance));
}
}
while (!pq.empty()) {
closest.push_back(a[pq.top().point_idx_]);
pq.pop();
}
}
return closest;
}
void Combs(unordered_map<char, vector<char>> &map, string const &digits,
vector<string> &combs, string const comb = "")
{
if (comb.size() == digits.size()) {
if (!comb.empty()) {
combs.push_back(comb);
}
return;
}
int digit_idx = comb.size();
char digit = digits[digit_idx];
auto it = map.find(digit);
if (it != map.end()) {
vector<char> const &letters = it->second;
for (char c : letters) {
Combs(map, digits, combs, comb + c);
}
}
}
int MaxSubarray(vector<int> const &a, int k)
{
int max_sum = numeric_limits<int>::min();
if (k > 0) {
int start = 0;
int sum = 0;
for (int i = 0; i < a.size(); ++i) {
while (i - start + 1 > k ||
(start < i && a[start] <= 0))
{
sum -= a[start];
++start;
}
sum += a[i];
max_sum = max(max_sum, sum);
if (sum <= 0) {
sum = 0;
start = i + 1;
}
}
}
return max_sum;
}
Node *Insert(Node *head, int val)
{
Node *n = new Node(val);
if (!head) {
n->next_ = n;
head = n;
} else {
n->next_ = head->next_;
head->next_ = n;
swap(n->val_, head->val_);
}
for (n = head; n->next_ != head && n->val_ > n->next_->val_; n = n->next_) {
swap(n->val_, n->next_->val_);
}
return head;
}
Assuming the graph is acyclic, we can find all paths from the source company to the target company. For each path we find a share, going through the path backward and applying the share percentage. Then, we sum the shares of the paths.
#include <iostream>
#include <vector>
#include <unordered_map>
using namespace std;
class Node;
class Link {
public:
Node *adjacent_node_;
double share_;
Link(Node *adjacent_node, double share)
{
adjacent_node_ = adjacent_node;
share_ = share;
}
};
class Node {
public:
Node(char name)
{
name_ = name;
}
void AddLink(Node *adjacent_node, double share)
{
links_.push_back(Link(adjacent_node, share));
}
char name_;
vector<Link> links_;
};
class Graph {
public:
~Graph()
{
for (auto n : nodes_) {
delete n;
}
}
void AddLink(char name1, char name2, double share)
{
Node *n1 = GetNode(name1);
Node *n2 = GetNode(name2);
n1->AddLink(n2, share);
}
Node *GetNode(char name)
{
auto it = name_to_node_.find(name);
if (it != name_to_node_.end()) {
return it->second;
}
Node *n = new Node(name);
nodes_.push_back(n);
name_to_node_[name] = n;
return n;
}
vector<Node *> nodes_;
private:
unordered_map<char, Node *> name_to_node_;
};
vector<vector<Link>> AllPaths(Graph &g, Node *source, Node *target)
{
class StackEntry {
public:
StackEntry(Link link, int offset) : link_(link), offset_(offset) {};
Link link_;
int offset_;
};
vector<vector<Link>> paths;
vector<StackEntry> st;
Link start_link(source, 0);
st.push_back(StackEntry(start_link, 0));
while (!st.empty()) {
StackEntry e = st.back();
st.pop_back();
Node *n = e.link_.adjacent_node_;
if (n) {
if (n == target) {
vector<Link> path{e.link_};
for (int i = st.size() - 1; i > 0; --i) {
path.push_back(st[i].link_);
}
paths.push_back(path);
} else if (e.offset_ < n->links_.size()) {
st.push_back(StackEntry(e.link_, e.offset_ + 1));
st.push_back(StackEntry(n->links_[e.offset_], 0));
}
}
}
return paths;
}
int main()
{
Graph g;
g.AddLink('A', 'B', 10);
g.AddLink('A', 'C', 2);
g.AddLink('B', 'C', 5);
vector<vector<Link>> paths = AllPaths(g, g.GetNode('A'), g.GetNode('C'));
double share = 0;
for (auto const &path : paths) {
double path_share = 100;
for (auto const &link : path) {
path_share *= link.share_ / 100.0;
}
share += path_share;
}
cout << share << "\n";
return 0;
}
vector<vector<int>> SumSets(vector<int> const &a, int sum,
unordered_map<pair<int, int>, vector<vector<int>>> &memo, int idx = 0)
{
vector<vector<int>> sets;
if (sum == 0) {
sets.push_back(vector<int>());
return sets;
}
if (sum < 0) {
return sets;
}
pair<int, int> memo_key = pair<int, int>(idx, sum);
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
for (int i = idx; i < a.size(); ++i) {
vector<vector<int>> subsets = SumSets(a, sum - a[i], memo, i + 1);
for (auto const & subset : subsets) {
vector<int> set{a[i]};
set.insert(set.end(), subset.begin(), subset.end());
sets.push_back(set);
}
}
memo[memo_key] = sets;
return memo[memo_key];
}
int Count(int ones, int zeros, unordered_map<tuple<int, int, bool>, int> &memo, bool prev_one = false)
{
if (ones == 0 &&
zeros == 0)
{
return 1;
}
if (ones < 0 ||
zeros < 0)
{
return 0;
}
tuple<int, int, bool> memo_key = make_tuple(ones, zeros, prev_one);
auto it = memo.find(memo_key);
if (it != memo.end()) {
return it->second;
}
int count = Count(ones, zeros - 1, memo, false);
if (!prev_one) {
count += Count(ones - 1, zeros, memo, true);
}
memo[memo_key] = count;
return memo[memo_key];
}
class TrieNode {
public:
TrieNode()
{
terminal_ = false;
}
~TrieNode()
{
for (auto it = children_.begin(); it != children_.end(); ++it) {
delete it->second;
}
children_.clear();
}
TrieNode *InsertChild(char c)
{
TrieNode *n = GetChild(c);
if (n == NULL) {
n = new TrieNode();
children_[c] = n;
}
return n;
}
TrieNode *GetChild(char c)
{
auto it = children_.find(c);
return it == children_.end() ? NULL : it->second;
}
bool terminal_;
private:
unordered_map<char, TrieNode *> children_;
};
class Trie {
public:
void Insert(string const &s)
{
TrieNode *n = &root_;
for (char c : s) {
n = n->InsertChild(c);
}
n->terminal_ = true;
}
TrieNode *GetRoot()
{
return &root_;
}
private:
TrieNode root_;
};
bool Composite(Trie &trie, vector<int> &memo, string const &s, int idx = 0)
{
if (idx == 0) {
memo.clear();
memo.resize(s.size(), -1);
}
if (idx >= s.size()) {
return idx != 0;
}
if (memo[idx] != -1) {
return memo[idx];
}
bool result = false;
TrieNode *n = trie.GetRoot();
for (int i = idx; i < s.size() && n; ++i) {
n = n->GetChild(s[i]);
if (n &&
n->terminal_ &&
Composite(trie, memo, s, i + 1))
{
result = true;
break;
}
}
memo[idx] = result;
return memo[idx];
}
If the node has right child, the successor is the deepest left node, starting from the right child.
If the node doesn't have right child, we can get the path from root to to the node, and find the successor (iterating from the end of the path) as the first node which has left child.
class Node {
public:
Node(int val)
{
val_ = val;
left_ = right_ = NULL;
}
int val_;
Node *left_, *right_;
};
vector<Node *> PathToNode(Node *root, Node *node)
{
class StackEntry {
public:
Node *node_;
int child_idx_;
StackEntry(Node *node, int child_idx)
{
node_ = node;
child_idx_ = child_idx;
}
};
vector<Node *> path;
if (root &&
node)
{
stack<StackEntry> st;
st.push(StackEntry(root, 0));
while (!st.empty()) {
StackEntry e = st.top();
st.pop();
if (e.node_) {
if (e.node_ == node) {
path.push_back(e.node_);
while (!st.empty()) {
path.push_back(st.top().node_);
st.pop();
}
return path;
}
if (e.child_idx_ == 0) {
st.push(StackEntry(e.node_, e.child_idx_ + 1));
st.push(StackEntry(e.node_->left_, 0));
} else if (e.child_idx_ == 1) {
st.push(StackEntry(e.node_, e.child_idx_ + 1));
st.push(StackEntry(e.node_->right_, 0));
}
}
}
}
return path;
}
Node *DeepestLeft(Node *node)
{
Node *n = node;
while (n &&
n->left_)
{
n = n->left_;
}
return n;
}
Node *GetInorderSuccessor(Node *root, Node *node)
{
if (root &&
node)
{
if (node->right_) {
return DeepestLeft(node->right_);
} else {
vector<Node *> path = PathToNode(root, node);
for (int i = 0; i + 1 < path.size(); ++i) {
if (path[i + 1]->left_ == path[i]) {
return path[i + 1];
}
}
}
}
return NULL;
}
class Node {
public:
Node(char val)
{
val_ = val;
}
void AddAdjacentNode(int n)
{
adjacent_nodes_.push_back(n);
}
char val_;
vector<int> adjacent_nodes_;
};
class Graph {
public:
void AddLink(char val1, char val2)
{
int n1 = AddNode(val1);
int n2 = AddNode(val2);
nodes_[n1].AddAdjacentNode(n2);
nodes_[n2].AddAdjacentNode(n1);
}
Node const *GetNode(char val) const
{
auto it = val_to_node_.find(val);
if (it != val_to_node_.end()) {
return &nodes_[it->second];
}
return NULL;
}
Node const *GetNode(int idx) const
{
return idx > 0 && idx < nodes_.size() ? &nodes_[idx] : NULL;
}
private:
int AddNode(char val)
{
auto it = val_to_node_.find(val);
if (it != val_to_node_.end()) {
return it->second;
}
nodes_.push_back(Node(val));
int node_idx = nodes_.size() - 1;
return val_to_node_[val] = node_idx;
}
vector<Node> nodes_;
unordered_map<char, int> val_to_node_;
};
bool WordMatchesPath(Graph const &g, string const &word)
{
int i = 0;
for (; i < word.size(); ++i) {
Node const *n = g.GetNode(word[i]);
if (!n) {
break;
}
if (i + 1 < word.size()) {
bool link_found = false;
for (int j : n->adjacent_nodes_) {
Node const *adjacent_node = g.GetNode(j);
if (adjacent_node->val_ == word[i + 1]) {
link_found = true;
break;
}
}
if (!link_found) {
break;
}
}
}
return i == word.size() &&
!word.empty();
}
class TrieNode {
public:
TrieNode()
{
terminal_ = false;
}
~TrieNode()
{
for (auto it = children_.begin(); it != children_.end(); ++it) {
delete it->second;
}
children_.clear();
}
TrieNode *InsertChild(char c)
{
TrieNode *n = GetChild(c);
if (n == NULL) {
n = new TrieNode();
children_[c] = n;
}
return n;
}
TrieNode *GetChild(char c)
{
auto it = children_.find(c);
return it == children_.end() ? NULL : it->second;
}
bool terminal_;
private:
unordered_map<char, TrieNode *> children_;
};
class Trie {
public:
void Insert(string const &s)
{
TrieNode *n = &root_;
for (char c : s) {
n = n->InsertChild(c);
}
n->terminal_ = true;
}
TrieNode *GetRoot()
{
return &root_;
}
private:
TrieNode root_;
};
vector<vector<string>> Group(vector<string> const &words)
{
Trie trie;
for (auto const &w : words) {
trie.Insert(w);
}
unordered_map<string, vector<int>> groups;
for (int idx = 0; idx < words.size(); ++idx) {
string const &w = words[idx];
for (int i = 0; i < w.size(); ++i) {
TrieNode *n = trie.GetRoot();
for (int j = i; j < w.size() && n; ++j) {
n = n->GetChild(w[j]);
if (n &&
n->terminal_)
{
int size = j - i + 1;
if (size < w.size()) {
groups[w.substr(i, size)].push_back(idx);
}
}
}
}
}
vector<vector<string>> out;
for (auto const &g : groups) {
if (!g.second.empty()) {
vector<string> v;
v.push_back(g.first);
for (int i : g.second) {
v.push_back(words[i]);
}
out.push_back(v);
}
}
return out;
}
RepSWE
RepHire high professional child development center in Charlotte. Pal-A-Roo’s Child Development Center is a family-owned child care facility offering ...
Rep
RepIf you want to attract someone then astrologer kumar can help you. Astrologer kumar is specialized in offering kamdev vashikaran ...
If the door has been toggled odd number of times, it'll be open.
Being toggled odd number of times, for Door N, means that N has odd number of factors (including factor 1).
The open door numbers happen to be perfect square, but this pattern is not quite easy to derive, without intensive whiteboarding.
- Alex August 07, 2017