forked from ahmedfgad/GeneticAlgorithmPython
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_stop_criteria.py
More file actions
174 lines (147 loc) · 7.64 KB
/
test_stop_criteria.py
File metadata and controls
174 lines (147 loc) · 7.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import pygad
import numpy
actual_num_fitness_calls_default_keep = 0
actual_num_fitness_calls_no_keep = 0
actual_num_fitness_calls_keep_elitism = 0
actual_num_fitness_calls_keep_parents = 0
num_generations = 50
sol_per_pop = 10
num_parents_mating = 5
function_inputs1 = [4,-2,3.5,5,-11,-4.7] # Function 1 inputs.
function_inputs2 = [-2,0.7,-9,1.4,3,5] # Function 2 inputs.
desired_output1 = 50 # Function 1 output.
desired_output2 = 30 # Function 2 output.
#### Define the fitness functions in the top-level of the module so that they are picklable and usable in the process-based parallel processing works.
#### If the functions are defined inside a class/method/function, they are not picklable and this error is raised: AttributeError: Can't pickle local object
#### Process-based parallel processing must have the used functions picklable.
def fitness_func_batch_multi(ga_instance, solution, solution_idx):
f = []
for sol in solution:
output1 = numpy.sum(sol*function_inputs1)
output2 = numpy.sum(sol*function_inputs2)
fitness1 = 1.0 / (numpy.abs(output1 - desired_output1) + 0.000001)
fitness2 = 1.0 / (numpy.abs(output2 - desired_output2) + 0.000001)
f.append([fitness1, fitness2])
return f
def fitness_func_no_batch_multi(ga_instance, solution, solution_idx):
output1 = numpy.sum(solution*function_inputs1)
output2 = numpy.sum(solution*function_inputs2)
fitness1 = 1.0 / (numpy.abs(output1 - desired_output1) + 0.000001)
fitness2 = 1.0 / (numpy.abs(output2 - desired_output2) + 0.000001)
return [fitness1, fitness2]
def fitness_func_batch_single(ga_instance, solution, solution_idx):
f = []
for sol in solution:
output = numpy.sum(solution*function_inputs1)
fitness = 1.0 / (numpy.abs(output - desired_output1) + 0.000001)
f.append(fitness)
return f
def fitness_func_no_batch_single(ga_instance, solution, solution_idx):
output = numpy.sum(solution*function_inputs1)
fitness = 1.0 / (numpy.abs(output - desired_output1) + 0.000001)
return fitness
def multi_objective_problem(keep_elitism=1,
keep_parents=-1,
fitness_batch_size=None,
stop_criteria=None,
parent_selection_type='sss',
mutation_type="random",
mutation_percent_genes="default",
multi_objective=False,
parallel_processing=None):
if fitness_batch_size is None or (type(fitness_batch_size) in pygad.GA.supported_int_types and fitness_batch_size == 1):
if multi_objective == True:
fitness_func = fitness_func_no_batch_multi
else:
fitness_func = fitness_func_no_batch_single
elif (type(fitness_batch_size) in pygad.GA.supported_int_types and fitness_batch_size > 1):
if multi_objective == True:
fitness_func = fitness_func_batch_multi
else:
fitness_func = fitness_func_batch_single
ga_optimizer = pygad.GA(num_generations=num_generations,
sol_per_pop=sol_per_pop,
num_genes=6,
num_parents_mating=num_parents_mating,
fitness_func=fitness_func,
fitness_batch_size=fitness_batch_size,
mutation_type=mutation_type,
mutation_percent_genes=mutation_percent_genes,
keep_elitism=keep_elitism,
keep_parents=keep_parents,
stop_criteria=stop_criteria,
parent_selection_type=parent_selection_type,
parallel_processing=parallel_processing,
suppress_warnings=True)
ga_optimizer.run()
return None
def test_number_calls_fitness_function_no_parallel_processing():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=None)
def test_number_calls_fitness_function_parallel_processing_thread_1():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=['thread', 1])
def test_number_calls_fitness_function_parallel_processing_thread_2():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=['thread', 2])
def test_number_calls_fitness_function_parallel_processing_thread_5():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=['thread', 5])
def test_number_calls_fitness_function_parallel_processing_thread_5_patch_4():
multi_objective_problem(multi_objective=False,
fitness_batch_size=4,
parallel_processing=['thread', 5])
def test_number_calls_fitness_function_parallel_processing_thread_5_patch_4_multi_objective():
multi_objective_problem(multi_objective=True,
fitness_batch_size=4,
parallel_processing=['thread', 5])
def test_number_calls_fitness_function_parallel_processing_process_1():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=['process', 1])
def test_number_calls_fitness_function_parallel_processing_process_2():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=['process', 2])
def test_number_calls_fitness_function_parallel_processing_process_5():
multi_objective_problem(multi_objective=False,
fitness_batch_size=None,
parallel_processing=['process', 5])
def test_number_calls_fitness_function_parallel_processing_process_5_patch_4():
multi_objective_problem(multi_objective=False,
fitness_batch_size=4,
parallel_processing=['process', 5])
def test_number_calls_fitness_function_parallel_processing_process_5_patch_4_multi_objective():
multi_objective_problem(multi_objective=True,
fitness_batch_size=4,
parallel_processing=['process', 5])
if __name__ == "__main__":
print()
test_number_calls_fitness_function_no_parallel_processing()
print()
#### Thread-based Parallel Processing
test_number_calls_fitness_function_parallel_processing_thread_1()
print()
test_number_calls_fitness_function_parallel_processing_thread_2()
print()
test_number_calls_fitness_function_parallel_processing_thread_5()
print()
test_number_calls_fitness_function_parallel_processing_thread_5_patch_4()
print()
test_number_calls_fitness_function_parallel_processing_thread_5_patch_4_multi_objective()
print()
#### Thread-based Parallel Processing
test_number_calls_fitness_function_parallel_processing_process_1()
print()
test_number_calls_fitness_function_parallel_processing_process_2()
print()
test_number_calls_fitness_function_parallel_processing_process_5()
print()
test_number_calls_fitness_function_parallel_processing_process_5_patch_4()
print()
test_number_calls_fitness_function_parallel_processing_process_5_patch_4_multi_objective()
print()