Skip to content

Commit

Permalink
sat: export from google3
Browse files Browse the repository at this point in the history
  • Loading branch information
Mizux committed Sep 27, 2024
1 parent 100b6bc commit 9a3c8a2
Show file tree
Hide file tree
Showing 19 changed files with 1,135 additions and 61 deletions.
2 changes: 2 additions & 0 deletions ortools/graph/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,10 @@ cc_library(
"//ortools/base",
"//ortools/base:int_type",
"//ortools/base:strong_vector",
"//ortools/util:bitset",
"//ortools/util:time_limit",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/strings",
],
)
Expand Down
127 changes: 127 additions & 0 deletions ortools/graph/cliques.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
#include <vector>

#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "ortools/util/bitset.h"

namespace operations_research {
namespace {
Expand Down Expand Up @@ -262,4 +264,129 @@ void CoverArcsByCliques(std::function<bool(int, int)> graph, int node_count,
initial_candidates.get(), 0, node_count, &actual, &stop);
}

void WeightedBronKerboschBitsetAlgorithm::Initialize(int num_nodes) {
work_ = 0;
weights_.assign(num_nodes, 0.0);

// We need +1 in case the graph is complete and form a clique.
clique_.resize(num_nodes + 1);
clique_weight_.resize(num_nodes + 1);
left_to_process_.resize(num_nodes + 1);
x_.resize(num_nodes + 1);

// Initialize to empty graph.
graph_.resize(num_nodes);
for (Bitset64<int>& bitset : graph_) {
bitset.ClearAndResize(num_nodes);
}
}

void WeightedBronKerboschBitsetAlgorithm::
TakeTransitiveClosureOfImplicationGraph() {
// We use Floyd-Warshall algorithm.
const int num_nodes = weights_.size();
for (int k = 0; k < num_nodes; ++k) {
// Loop over all the i => k, we can do that by looking at the not(k) =>
// not(i).
for (const int i : graph_[k ^ 1]) {
// Now i also implies all the literals implied by k.
graph_[i].Union(graph_[k]);
}
}
}

std::vector<std::vector<int>> WeightedBronKerboschBitsetAlgorithm::Run() {
clique_index_and_weight_.clear();
std::vector<std::vector<int>> cliques;

const int num_nodes = weights_.size();
in_clique_.ClearAndResize(num_nodes);

queue_.clear();

int depth = 0;
left_to_process_[0].ClearAndResize(num_nodes);
x_[0].ClearAndResize(num_nodes);
for (int i = 0; i < num_nodes; ++i) {
left_to_process_[0].Set(i);
queue_.push_back(i);
}

// We run an iterative DFS where we push all possible next node to
// queue_. We just abort brutally if we hit the work limit.
while (!queue_.empty() && work_ <= work_limit_) {
const int node = queue_.back();
if (!in_clique_[node]) {
// We add this node to the clique.
in_clique_.Set(node);
clique_[depth] = node;
left_to_process_[depth].Clear(node);
x_[depth].Set(node);

// Note that it might seems we don't need to keep both set since we
// only process nodes in order, but because of the pivot optim, while
// both set are sorted, they can be "interleaved".
++depth;
work_ += num_nodes;
const double current_weight = weights_[node] + clique_weight_[depth - 1];
clique_weight_[depth] = current_weight;
left_to_process_[depth].SetToIntersectionOf(left_to_process_[depth - 1],
graph_[node]);
x_[depth].SetToIntersectionOf(x_[depth - 1], graph_[node]);

// Choose a pivot. We use the vertex with highest weight according to:
// Samuel Souza Britoa, Haroldo Gambini Santosa, "Preprocessing and
// Cutting Planes with Conflict Graphs",
// https://arxiv.org/pdf/1909.07780
// but maybe random is more robust?
int pivot = -1;
double pivot_weight = -1.0;
for (const int candidate : x_[depth]) {
const double candidate_weight = weights_[candidate];
if (candidate_weight > pivot_weight) {
pivot = candidate;
pivot_weight = candidate_weight;
}
}
double total_weight_left = 0.0;
for (const int candidate : left_to_process_[depth]) {
const double candidate_weight = weights_[candidate];
if (candidate_weight > pivot_weight) {
pivot = candidate;
pivot_weight = candidate_weight;
}
total_weight_left += candidate_weight;
}

// Heuristic: We can abort early if there is no way to reach the
// threshold here.
if (current_weight + total_weight_left < weight_threshold_) {
continue;
}

if (pivot == -1 && current_weight >= weight_threshold_) {
// This clique is maximal.
clique_index_and_weight_.push_back({cliques.size(), current_weight});
cliques.emplace_back(clique_.begin(), clique_.begin() + depth);
continue;
}

for (const int next : left_to_process_[depth]) {
if (graph_[pivot][next]) continue; // skip.
queue_.push_back(next);
}
} else {
// We finished exploring node.
// backtrack.
--depth;
DCHECK_GE(depth, 0);
DCHECK_EQ(clique_[depth], node);
in_clique_.Clear(node);
queue_.pop_back();
}
}

return cliques;
}

} // namespace operations_research
82 changes: 82 additions & 0 deletions ortools/graph/cliques.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include "ortools/base/int_type.h"
#include "ortools/base/logging.h"
#include "ortools/base/strong_vector.h"
#include "ortools/util/bitset.h"
#include "ortools/util/time_limit.h"

namespace operations_research {
Expand Down Expand Up @@ -358,6 +359,87 @@ class BronKerboschAlgorithm {
TimeLimit* time_limit_;
};

// More specialized version used to separate clique-cuts in MIP solver.
// This finds all maximal clique with a weight greater than a given threshold.
// It also has computation limit.
//
// This implementation assumes small graph since we use a dense bitmask
// representation to encode the graph adjacency. So it shouldn't really be used
// with more than a few thousands nodes.
class WeightedBronKerboschBitsetAlgorithm {
public:
// Resets the class to an empty graph will all weights of zero.
// This also reset the work done.
void Initialize(int num_nodes);

// Set the weight of a given node, must be in [0, num_nodes).
// Weights are assumed to be non-negative.
void SetWeight(int i, double weight) { weights_[i] = weight; }

// Add an edge in the graph.
void AddEdge(int a, int b) {
graph_[a].Set(b);
graph_[b].Set(a);
}

// We count the number of basic operations, and stop when we reach this limit.
void SetWorkLimit(int64_t limit) { work_limit_ = limit; }

// Set the minimum weight of the maximal cliques we are looking for.
void SetMinimumWeight(double min_weight) { weight_threshold_ = min_weight; }

// This function is quite specific. It interprets node i as the negated
// literal of node i ^ 1. And all j in graph[i] as literal that are in at most
// two relation. So i implies all not(j) for all j in graph[i].
//
// The transitive close runs in O(num_nodes ^ 3) in the worst case, but since
// we process 64 bits at the time, it is okay to run it for graph up to 1k
// nodes.
void TakeTransitiveClosureOfImplicationGraph();

// Runs the algo and returns all maximal clique with a weight above the
// configured thrheshold via SetMinimumWeight(). It is possible we reach the
// work limit before that.
std::vector<std::vector<int>> Run();

// Specific API where the index refer in the last result of Run().
// This allows to select cliques when they are many.
std::vector<std::pair<int, double>>& GetMutableIndexAndWeight() {
return clique_index_and_weight_;
}

int64_t WorkDone() const { return work_; }

bool HasEdge(int i, int j) const { return graph_[i][j]; }

private:
int64_t work_ = 0;
int64_t work_limit_ = std::numeric_limits<int64_t>::max();
double weight_threshold_ = 0.0;

std::vector<double> weights_;
std::vector<Bitset64<int>> graph_;

// Iterative DFS queue.
std::vector<int> queue_;

// Current clique we are constructing.
// Note this is always of size num_nodes, the clique is in [0, depth)
Bitset64<int> in_clique_;
std::vector<int> clique_;

// We maintain the weight of the clique. We use a stack to avoid floating
// point issue with +/- weights many times. So clique_weight_[i] is the sum of
// weight from [0, i) of element of the cliques.
std::vector<double> clique_weight_;

// Correspond to P and X in BronKerbosch description.
std::vector<Bitset64<int>> left_to_process_;
std::vector<Bitset64<int>> x_;

std::vector<std::pair<int, double>> clique_index_and_weight_;
};

template <typename NodeIndex>
void BronKerboschAlgorithm<NodeIndex>::InitializeState(State* state) {
DCHECK(state != nullptr);
Expand Down
115 changes: 115 additions & 0 deletions ortools/graph/cliques_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "ortools/graph/cliques.h"

#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
Expand All @@ -24,6 +25,7 @@

#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/random/distributions.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
Expand Down Expand Up @@ -412,6 +414,44 @@ TEST(BronKerbosch, CompleteGraphCover) {
EXPECT_EQ(10, all_cliques[0].size());
}

TEST(WeightedBronKerboschBitsetAlgorithmTest, CompleteGraph) {
const int num_nodes = 1000;
WeightedBronKerboschBitsetAlgorithm algo;
algo.Initialize(num_nodes);
for (int i = 0; i < num_nodes; ++i) {
for (int j = i + 1; j < num_nodes; ++j) {
algo.AddEdge(i, j);
}
}
std::vector<std::vector<int>> cliques = algo.Run();
EXPECT_EQ(cliques.size(), 1);
for (const std::vector<int>& clique : cliques) {
EXPECT_EQ(num_nodes, clique.size());
}
}

TEST(WeightedBronKerboschBitsetAlgorithmTest, ImplicationGraphClosure) {
const int num_nodes = 10;
WeightedBronKerboschBitsetAlgorithm algo;
algo.Initialize(num_nodes);
for (int i = 0; i + 2 < num_nodes; i += 2) {
const int j = i + 2;
algo.AddEdge(i, j ^ 1); // i => j
}
algo.TakeTransitiveClosureOfImplicationGraph();
for (int i = 0; i < num_nodes; ++i) {
for (int j = 0; j < num_nodes; ++j) {
if (i % 2 == 0 && j % 2 == 0) {
if (j > i) {
EXPECT_TRUE(algo.HasEdge(i, j ^ 1));
} else {
EXPECT_FALSE(algo.HasEdge(i, j ^ 1));
}
}
}
}
}

TEST(BronKerbosch, EmptyGraphCover) {
auto graph = EmptyGraph;
CliqueReporter<int> reporter;
Expand Down Expand Up @@ -477,6 +517,50 @@ TEST(BronKerboschAlgorithmTest, FullKPartiteGraph) {
}
}

TEST(WeightedBronKerboschBitsetAlgorithmTest, FullKPartiteGraph) {
const int kNumPartitions[] = {2, 3, 4, 5, 6, 7};
for (const int num_partitions : kNumPartitions) {
SCOPED_TRACE(absl::StrCat("num_partitions = ", num_partitions));
WeightedBronKerboschBitsetAlgorithm algo;

const int num_nodes = num_partitions * num_partitions;
algo.Initialize(num_nodes);

for (int i = 0; i < num_nodes; ++i) {
for (int j = i + 1; j < num_nodes; ++j) {
if (FullKPartiteGraph(num_partitions, i, j)) algo.AddEdge(i, j);
}
}

std::vector<std::vector<int>> cliques = algo.Run();
EXPECT_EQ(cliques.size(), pow(num_partitions, num_partitions));
for (const std::vector<int>& clique : cliques) {
EXPECT_EQ(num_partitions, clique.size());
}
}
}

TEST(WeightedBronKerboschBitsetAlgorithmTest, ModuloGraph) {
int num_partitions = 50;
int partition_size = 100;
WeightedBronKerboschBitsetAlgorithm algo;

const int num_nodes = num_partitions * partition_size;
algo.Initialize(num_nodes);

for (int i = 0; i < num_nodes; ++i) {
for (int j = i + 1; j < num_nodes; ++j) {
if (ModuloGraph(num_partitions, i, j)) algo.AddEdge(i, j);
}
}

std::vector<std::vector<int>> cliques = algo.Run();
EXPECT_EQ(cliques.size(), num_partitions);
for (const std::vector<int>& clique : cliques) {
EXPECT_EQ(partition_size, clique.size());
}
}

// The following two tests run the Bron-Kerbosch algorithm with wall time
// limit and deterministic time limit. They use a full 15-partite graph with
// a one second time limit.
Expand Down Expand Up @@ -590,6 +674,37 @@ BENCHMARK(BM_FindCliquesInModuloGraphWithBronKerboschAlgorithm)
->ArgPair(500, 10)
->ArgPair(1000, 5);

void BM_FindCliquesInModuloGraphWithBitsetBK(benchmark::State& state) {
int num_partitions = state.range(0);
int partition_size = state.range(1);
const int kExpectedNumCliques = num_partitions;
const int kExpectedCliqueSize = partition_size;
const int num_nodes = num_partitions * partition_size;
for (auto _ : state) {
WeightedBronKerboschBitsetAlgorithm algo;
algo.Initialize(num_nodes);
for (int i = 0; i < num_nodes; ++i) {
for (int j = i + 1; j < num_nodes; ++j) {
if (ModuloGraph(num_partitions, i, j)) algo.AddEdge(i, j);
}
}

std::vector<std::vector<int>> cliques = algo.Run();
EXPECT_EQ(cliques.size(), kExpectedNumCliques);
for (const std::vector<int>& clique : cliques) {
EXPECT_EQ(kExpectedCliqueSize, clique.size());
}
}
}

BENCHMARK(BM_FindCliquesInModuloGraphWithBitsetBK)
->ArgPair(5, 1000)
->ArgPair(10, 500)
->ArgPair(50, 100)
->ArgPair(100, 50)
->ArgPair(500, 10)
->ArgPair(1000, 5);

// A benchmark that finds all maximal cliques in a 7-partite graph (a graph
// where the nodes are divided into 7 groups of size 7; each node is connected
// to all nodes in other groups but to no node in the same group). This graph
Expand Down
Loading

0 comments on commit 9a3c8a2

Please sign in to comment.