C++代码
#include <cstdint>
#include <iostream>
#include <queue>
#include <utility>
#include <vector>
namespace neural_network {
constexpr uint64_t kVecInitCapacity = 16;
struct Edge {
explicit Edge(const uint64_t v1, const int64_t weight, Edge* const next)
: v1_(v1), weight_(weight), next_(next) {
}
uint64_t v1_;
int64_t weight_;
Edge* next_;
};
struct Vertex {
Vertex()
: status_(0), threshold_(0), inDegree_(0), outDegree_(0), edge_(nullptr) {
}
int64_t calcFinalStatus() const {
// 输入层给定的状态即为最终值,不需要再减去阈值
return (inDegree_ == 0) ? status_ : (status_ - threshold_);
}
int64_t status_;
int64_t threshold_;
uint64_t inDegree_;
uint64_t outDegree_;
Edge* edge_;
};
class NeuralNetwork {
public:
explicit NeuralNetwork(const uint64_t vertexSize)
: vertexVec_(vertexSize) {
}
uint64_t VertexSize() const {
return vertexVec_.size();
}
uint64_t EdgeSize() const {
return edgeVec_.size();
}
void SetVertex(const uint64_t v0, const int64_t status, const int64_t threshold);
void Push(const uint64_t v0, const uint64_t v1, const int64_t weight);
std::vector<uint64_t> TopologicalSort() const;
std::vector<std::pair<uint64_t, int64_t>> CalcOutputLayerStatus();
private:
std::vector<Vertex> vertexVec_;
std::deque<Edge> edgeVec_;
};
} // namespace neural_network
namespace neural_network {
void NeuralNetwork::SetVertex(const uint64_t v0, const int64_t status, const int64_t threshold) {
Vertex &vertex0 = vertexVec_.at(v0);
vertex0.status_ = status;
vertex0.threshold_ = threshold;
}
void NeuralNetwork::Push(const uint64_t v0, const uint64_t v1, const int64_t weight) {
Vertex &vertex0 = vertexVec_.at(v0);
edgeVec_.emplace_back(v1, weight, vertex0.edge_);
vertex0.edge_ = &edgeVec_.back();
++(vertex0.outDegree_);
Vertex &vertex1 = vertexVec_.at(v1);
++(vertex1.inDegree_);
}
std::vector<uint64_t> NeuralNetwork::TopologicalSort() const {
const uint64_t vertexSize = VertexSize();
std::vector<uint64_t> resVec;
resVec.reserve(vertexSize);
std::vector<uint64_t> inDegreeVec(vertexSize);
for (uint64_t v0 = 0; v0 < vertexSize; ++v0) {
const Vertex &vertex0 = vertexVec_.at(v0);
inDegreeVec.at(v0) = vertex0.inDegree_;
if (vertex0.inDegree_ == 0) {
resVec.emplace_back(v0);
}
}
uint64_t vertexQueueIndex = 0;
while (vertexQueueIndex < resVec.size()) {
const uint64_t v0 = resVec.at(vertexQueueIndex);
++vertexQueueIndex;
const Vertex &vertex0 = vertexVec_.at(v0);
for (const Edge *e1 = vertex0.edge_; e1 != nullptr; e1 = e1->next_) {
const uint64_t v1 = e1->v1_;
const uint64_t inDegree = --inDegreeVec[v1];
if (inDegree == 0) {
resVec.emplace_back(v1);
}
}
}
return resVec;
}
std::vector<std::pair<uint64_t, int64_t>> NeuralNetwork::CalcOutputLayerStatus() {
const uint64_t vertexSize = VertexSize();
std::vector<std::pair<uint64_t, int64_t>> resVec;
resVec.reserve(std::min(kVecInitCapacity, vertexSize));
std::vector<uint64_t> vertexSortVec = TopologicalSort();
if (vertexSize != vertexSortVec.size()) {
return resVec;
}
for (const uint64_t v0 : vertexSortVec) {
const Vertex &vertex0 = vertexVec_.at(v0);
const int64_t vertex0Status = vertex0.calcFinalStatus();
if (vertex0Status > 0) {
for (const Edge *e1 = vertex0.edge_; e1 != nullptr; e1 = e1->next_) {
Vertex &vertex1 = vertexVec_.at(e1->v1_);
vertex1.status_ += vertex0Status * e1->weight_;
}
}
}
for (uint64_t v0 = 0; v0 < vertexSize; ++v0) {
const Vertex &vertex0 = vertexVec_.at(v0);
if (vertex0.outDegree_ == 0) {
const int64_t vertex0Status = vertex0.calcFinalStatus();
if (vertex0Status > 0) {
resVec.emplace_back(v0, vertex0Status);
}
}
}
return resVec;
}
} // namespace neural_network
int main(void) {
uint64_t vertexSize = 0, edgeSize = 0;
scanf("%lu%lu", &vertexSize, &edgeSize);
neural_network::NeuralNetwork neuralNetwork(vertexSize);
for (uint64_t v0 = 0; v0 < vertexSize; ++v0) {
int64_t status = 0, threshold = 0;
scanf("%ld%ld", &status, &threshold);
neuralNetwork.SetVertex(v0, status, threshold);
}
for (uint64_t e1 = 0; e1 < edgeSize; ++e1) {
uint64_t v0 = 0, v1 = 0;
int64_t weight = 0;
scanf("%lu%lu%ld", &v0, &v1, &weight);
neuralNetwork.Push(v0-1, v1-1, weight);
}
const std::vector<std::pair<uint64_t, int64_t>> outputLayerStatuses = neuralNetwork.CalcOutputLayerStatus();
if (outputLayerStatuses.empty()) {
puts("NULL");
} else {
for (const auto outputLayerStatus : outputLayerStatuses) {
printf("%lu %ld\n", outputLayerStatus.first + 1, outputLayerStatus.second);
}
}
return 0;
}
就是喜欢AcWing有人点赞
哥们儿,给你个小建议,以写工程项目的方式刷题不算太明智阿。还是注重算法本身吧。
谢谢,我主要是顺便练习一下公司的编程规范。
哈哈!