I would like to do a multi-objective optimization and solve a discrete problem using pymoo. For that, there is the class IntegerRandomSampling() as seen on pymoo's webpage: https://pymoo.org/customization/discrete.html
As an example, I consider a test application to find the locations of N individuals on a 2D surface map whose coordinates are described by a index (integer) and I want pymoo to generate N random integers representing their locations for each "iteration".
I have noticed two things for which I would be happy to receive the community's opinion:
- it does not seem so random because every time I re-run the test, the N-size array of random integers always follows the same sequence;
- more problematic is at some point (and always the same, actually) the N-size array of random integers becomes floats instead of integers as one would expect for the return value of the class IntegerRandomSampling().
Does anyone know if this is an identified error or if I am not doing things properly ? Below is a little script reproducing what I am trying to explain. The print(x) instruction around line 43 will show the N-size array of random integers, being always the same and becoming floats.
Many thanks.
EDIT:
- I understood why the sequence of random integers is always the same, as it is initialised by the same "seed" but why it gets to floats is still a mystery...
import numpy as np
from pymoo.core.problem import ElementwiseProblem
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.operators.crossover.sbx import SBX
from pymoo.operators.mutation.pm import PM
from pymoo.operators.sampling.rnd import FloatRandomSampling
from pymoo.operators.sampling.rnd import IntegerRandomSampling
class stru:
def __init__(self):
self.a = 0
class MaskProblem(ElementwiseProblem):
def __init__(self, data):
self.xg = data.xg
self.yg = data.yg
self.A = data.A
self.N = data.N
# design space
dsxl = np.zeros((self.N),dtype=int)
dsxu = np.zeros((self.N), dtype=int)
for j in range(self.N):
dsxu[j] = int(len(self.xg)*len(self.yg)-1)
super().__init__(n_var=self.N,
n_obj=self.N-1,
n_constr=0,
xl=dsxl,
xu=dsxu)
def _evaluate(self, x, out, *args, **kwargs):
#
print(x)
p = np.zeros((self.N,2),dtype=float)
for k in range(self.N):
j, i = np.unravel_index(np.abs(self.A - x[k]).argmin(), self.A.shape)
p[k,0] = self.xg[i]
p[k,1] = self.yg[j]
#
kmid = int(0.5*(self.N-1))
fobj = np.zeros((self.N-1),dtype=float)
for k in range(kmid):
fobj[k] = np.sqrt(np.square(p[k,0]-p[kmid,0])+np.square(p[k,1]-p[kmid,1]))
for k in range(kmid,self.N):
fobj[k-kmid] = np.sqrt(np.square(p[k, 0] - p[kmid, 0]) + np.square(p[k, 1] - p[kmid, 1]))
#print(f'x={x[0][0]}, i={i},j={j}, A(i,j)={data.A[j, i]}')
#
# objectives array
out["F"] = fobj
data = stru()
xmin = 0.0
xmax = 1.0
NX = 3
data.xg = np.linspace(xmin, xmax, NX)
ymin = 0.0
ymax = 1.0
NY = 5
data.yg = np.linspace(ymin, ymax, NY)
data.A = np.zeros((NY,NX),dtype=int)
for jx in range(NX):
for jy in range(NY):
data.A[jy,jx] = jy*NX + jx
print(data.A)
data.N = 3
n_population = 40
n_offsprings = 10
n_gen = 100
crossover_prob = 0.9
crossover_eta = 15
mutation = 20
problem = MaskProblem(data)
algorithm = NSGA2(
pop_size=n_population,
n_offsprings=n_offsprings,
sampling=IntegerRandomSampling(),
crossover=SBX(prob=crossover_prob, eta=crossover_eta),
mutation=PM(eta=mutation),
eliminate_duplicates=True
)
from pymoo.optimize import minimize
res = minimize(problem,algorithm,termination=('n_gen', n_gen),seed=1,save_history=True,verbose=False)
In order to keep your data type as integer, you can explicitly specify the type via the
vtype
argument.