Skip to content

Commit 4b302e6

Browse files
committed
make format
1 parent 48ec3aa commit 4b302e6

File tree

4 files changed

+56
-60
lines changed

4 files changed

+56
-60
lines changed

vpr/src/place/RL_agent_util.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -117,20 +117,20 @@ void update_move_generator(std::unique_ptr<MoveGenerator>& move_generator, std::
117117
}
118118
}
119119

120-
void determine_agent_block_types(){
120+
void determine_agent_block_types() {
121121
//Loop through all available logical block types and store the ones that exist in the netlist
122122
auto& device_ctx = g_vpr_ctx.device();
123123
auto& cluster_ctx = g_vpr_ctx.clustering();
124124
auto& place_ctx = g_vpr_ctx.mutable_placement();
125125
int agent_type_index = 0;
126126
for (auto itype : device_ctx.logical_block_types) {
127127
if (itype.index == 0) //ignore empty type
128-
continue;
128+
continue;
129129
auto blk_per_type = cluster_ctx.clb_nlist.blocks_per_type(itype);
130130
if (blk_per_type.size() != 0) {
131-
place_ctx.phys_blk_type_to_agent_blk_type_map.insert(std::pair<int, int>(agent_type_index, itype.index));
132-
place_ctx.agent_blk_type_to_phys_blk_type_map.insert(std::pair<int, int>(itype.index, agent_type_index));
133-
agent_type_index++;
131+
place_ctx.phys_blk_type_to_agent_blk_type_map.insert(std::pair<int, int>(agent_type_index, itype.index));
132+
place_ctx.agent_blk_type_to_phys_blk_type_map.insert(std::pair<int, int>(itype.index, agent_type_index));
133+
agent_type_index++;
134134
}
135135
}
136136
}

vpr/src/place/place.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3224,7 +3224,7 @@ static void print_placement_move_types_stats(
32243224
float moves, accepted, rejected, aborted;
32253225

32263226
float total_moves = 0;
3227-
for(size_t iaction = 0; iaction < move_type_stat.blk_type_moves.size(); iaction++){
3227+
for (size_t iaction = 0; iaction < move_type_stat.blk_type_moves.size(); iaction++) {
32283228
total_moves += move_type_stat.blk_type_moves[iaction];
32293229
}
32303230

vpr/src/place/simpleRL_move_generator.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -82,12 +82,12 @@ void KArmedBanditAgent::process_outcome(double reward, e_reward_function reward_
8282
//write agent internal q-table and actions into a file for debugging purposes
8383
//agent_info_file_ variable is a NULL pointer by default
8484
//info file is not generated unless the agent_info_file_ set to a filename in "init_q_scores" function
85-
if(agent_info_file_) {
85+
if (agent_info_file_) {
8686
write_agent_info(last_action_, reward);
8787
}
8888
}
8989

90-
void KArmedBanditAgent::write_agent_info(int last_action, double reward){
90+
void KArmedBanditAgent::write_agent_info(int last_action, double reward) {
9191
fseek(agent_info_file_, 0, SEEK_END);
9292
fprintf(agent_info_file_, "%d,", last_action);
9393
fprintf(agent_info_file_, "%g,", reward);
@@ -134,9 +134,9 @@ void EpsilonGreedyAgent::init_q_scores() {
134134

135135
//agent_info_file_ = vtr::fopen("agent_info.txt", "w");
136136
//write agent internal q-table and actions into file for debugging purposes
137-
if(agent_info_file_) {
137+
if (agent_info_file_) {
138138
//we haven't performed any moves yet, hence last_aciton and reward are 0
139-
write_agent_info(0,0);
139+
write_agent_info(0, 0);
140140
}
141141

142142
set_epsilon_action_prob();
@@ -255,9 +255,9 @@ void SoftmaxAgent::init_q_scores() {
255255

256256
// agent_info_file_ = vtr::fopen("agent_info.txt", "w");
257257
//write agent internal q-table and actions into file for debugging purposes
258-
if(agent_info_file_) {
258+
if (agent_info_file_) {
259259
//we haven't performed any moves yet, hence last_aciton and reward are 0
260-
write_agent_info(0,0);
260+
write_agent_info(0, 0);
261261
}
262262

263263
/*
@@ -269,7 +269,6 @@ void SoftmaxAgent::init_q_scores() {
269269
set_block_ratio();
270270
}
271271
set_action_prob();
272-
273272
}
274273

275274
t_propose_action SoftmaxAgent::propose_action() {

vpr/src/place/simpleRL_move_generator.h

Lines changed: 44 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -17,27 +17,27 @@ class KArmedBanditAgent {
1717
virtual ~KArmedBanditAgent() {}
1818

1919
/**
20-
* @brief Choose a move type to perform and a block type that move should be performed with based on Q-table
21-
*
22-
* @return A move type and a block type as a "t_propose_action" struct
23-
* If the agent is set to only propose move type, then block type index in the struct will be set to -1
24-
*/
20+
* @brief Choose a move type to perform and a block type that move should be performed with based on Q-table
21+
*
22+
* @return A move type and a block type as a "t_propose_action" struct
23+
* If the agent is set to only propose move type, then block type index in the struct will be set to -1
24+
*/
2525
virtual t_propose_action propose_action() = 0;
2626

2727
/**
28-
* @brief Update the agent Q-table based on the reward received by the SA algorithm
29-
*
30-
* @param reward A double value calculated in "place.cpp" file showing how placement cost was affected by the prior action taken
31-
* @param reward_func The reward function used by the agent, detail explanation can be found on "directed_moves_util.h" file
32-
*/
28+
* @brief Update the agent Q-table based on the reward received by the SA algorithm
29+
*
30+
* @param reward A double value calculated in "place.cpp" file showing how placement cost was affected by the prior action taken
31+
* @param reward_func The reward function used by the agent, detail explanation can be found on "directed_moves_util.h" file
32+
*/
3333
void process_outcome(double, e_reward_function);
3434

3535
/**
36-
* @brief write all agent internal information (Q-table, reward for each performed action, ...) to a file (agent_info_file_)
37-
*
38-
* @param last_action Last action performed by the RL-agent
39-
* @param reward A double value calculated in "place.cpp" file showing how placement cost was affected by the prior action taken
40-
*/
36+
* @brief write all agent internal information (Q-table, reward for each performed action, ...) to a file (agent_info_file_)
37+
*
38+
* @param last_action Last action performed by the RL-agent
39+
* @param reward A double value calculated in "place.cpp" file showing how placement cost was affected by the prior action taken
40+
*/
4141
void write_agent_info(int last_action, double reward);
4242

4343
protected:
@@ -72,32 +72,31 @@ class EpsilonGreedyAgent : public KArmedBanditAgent {
7272
t_propose_action propose_action() override; //Returns the type of the next action as well as the block type the agent wishes to perform
7373

7474
public:
75-
7675
/**
77-
* @brief Set the user-specified epsilon for the E-greedy agent
78-
*
79-
* @param epsilon Epsilon value for the agent, can be specified by the command-line option "--place_agent_epsilon"
80-
* Epsilon default value is 0.3.
81-
*/
76+
* @brief Set the user-specified epsilon for the E-greedy agent
77+
*
78+
* @param epsilon Epsilon value for the agent, can be specified by the command-line option "--place_agent_epsilon"
79+
* Epsilon default value is 0.3.
80+
*/
8281
void set_epsilon(float epsilon);
8382

8483
/**
85-
* @brief Set equal action probability to all available actions.
86-
*/
84+
* @brief Set equal action probability to all available actions.
85+
*/
8786
void set_epsilon_action_prob();
8887

8988
/**
90-
* @brief Set step size for q-table updates
91-
*
92-
* @param gamma Controls how quickly the agent's memory decays, can be specified by the command-line option "--place_agent_gamma"
93-
* Gamma default value is 0.05.
94-
* @param move_lim Number of moves per temperature
95-
*/
89+
* @brief Set step size for q-table updates
90+
*
91+
* @param gamma Controls how quickly the agent's memory decays, can be specified by the command-line option "--place_agent_gamma"
92+
* Gamma default value is 0.05.
93+
* @param move_lim Number of moves per temperature
94+
*/
9695
void set_step(float gamma, int move_lim);
9796

9897
/**
99-
* @brief Initialize agent's Q-table and internal variable to zero (RL-agent learns everything throughout the placement run and has no prior knowledge)
100-
*/
98+
* @brief Initialize agent's Q-table and internal variable to zero (RL-agent learns everything throughout the placement run and has no prior knowledge)
99+
*/
101100
void init_q_scores();
102101

103102
private:
@@ -122,32 +121,30 @@ class SoftmaxAgent : public KArmedBanditAgent {
122121
t_propose_action propose_action() override; //Returns the type of the next action as well as the block type the agent wishes to perform
123122

124123
public:
125-
126124
/**
127-
* @brief Calculate the fraction of total netlist blocks for each agent block type and will be used by the "set_action_prob" function.
128-
*/
125+
* @brief Calculate the fraction of total netlist blocks for each agent block type and will be used by the "set_action_prob" function.
126+
*/
129127
void set_block_ratio();
130128

131-
132129
/**
133-
* @brief Set action probability for all available actions.
134-
* If agent only proposes move type, the action probabilities would be equal for all move types at the beginning.
135-
* If agent proposes both move and block type, the action_prob for each action would be based on its block type count in the netlist.
136-
*/
130+
* @brief Set action probability for all available actions.
131+
* If agent only proposes move type, the action probabilities would be equal for all move types at the beginning.
132+
* If agent proposes both move and block type, the action_prob for each action would be based on its block type count in the netlist.
133+
*/
137134
void set_action_prob();
138135

139136
/**
140-
* @brief Set step size for q-table updates
141-
*
142-
* @param gamma Controls how quickly the agent's memory decays, can be specified by the command-line option "--place_agent_gamma"
143-
* Gamma default value is 0.05.
144-
* @param move_lim Number of moves per temperature
145-
*/
137+
* @brief Set step size for q-table updates
138+
*
139+
* @param gamma Controls how quickly the agent's memory decays, can be specified by the command-line option "--place_agent_gamma"
140+
* Gamma default value is 0.05.
141+
* @param move_lim Number of moves per temperature
142+
*/
146143
void set_step(float gamma, int move_lim);
147144

148145
/**
149-
* @brief Initialize agent's Q-table and internal variable to zero (RL-agent learns everything throughout the placement run and has no prior knowledge)
150-
*/
146+
* @brief Initialize agent's Q-table and internal variable to zero (RL-agent learns everything throughout the placement run and has no prior knowledge)
147+
*/
151148
void init_q_scores();
152149

153150
private:

0 commit comments

Comments
 (0)