Source code for rbfopt_test_functions

"""Test functions.

This module implements several known mathematical functions, that can
be used to test RBFOpt.

Licensed under Revised BSD license, see LICENSE.
(C) Copyright Singapore University of Technology and Design 2014.
(C) Copyright International Business Machines Corporation 2017.

"""

from __future__ import print_function
from __future__ import division
from __future__ import absolute_import

import sys
import math
import numpy as np
from rbfopt.rbfopt_black_box import RbfoptBlackBox


[docs]class branin: """ Branin function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = ((x[1] - (5.1/(4*math.pi*math.pi))*x[0]*x[0] + 5/math.pi*x[0] - 6)**2 + 10*(1-1/(8*math.pi)) * math.cos(x[0]) +10) return(value)
dimension = 2 var_lower = np.array([-5, 0]) var_upper = np.array([10, 15]) optimum_point = np.array([9.42477796, 2.47499998]) additional_optima = np.array([ [-3.14159265, 12.27500000], [3.14159265, 2.27500000] ]) optimum_value = 0.397887357729739 var_type = np.array(['R'] * 2)
# -- end class
[docs]class hartman3: """ Hartman3 function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==3) value = -math.fsum([ cls.c[i] * np.exp(-math.fsum([cls.a[j][i]* (x[j] - cls.p[j][i])**2 for j in range(3)])) for i in range(4) ]) return(value)
a = [ [3.0, 0.1, 3.0, 0.1], [10.0, 10.0, 10.0, 10.0], [30.0, 35.0, 30.0, 35.0] ] p = [ [0.36890, 0.46990, 0.10910, 0.03815], [0.11700, 0.43870, 0.87320, 0.57430], [0.26730, 0.74700, 0.55470, 0.88280] ] c = [1.0, 1.2, 3.0, 3.2] dimension = 3 var_lower = np.array([0, 0, 0]) var_upper = np.array([1, 1, 1]) optimum_point = np.array([0.1, 0.55592003, 0.85218259]) optimum_value = -3.8626347486217725 var_type = np.array(['R'] * 3)
# -- end class
[docs]class hartman6: """ Hartman6 function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) value = -math.fsum([ cls.c[i] * np.exp(-math.fsum([cls.a[j][i]* (x[j] - cls.p[j][i])**2 for j in range(6)])) for i in range(4) ]) return(value)
a = [ [10.00, 0.05, 3.00, 17.00], [3.00, 10.00, 3.50, 8.00], [17.00, 17.00, 1.70, 0.05], [3.50, 0.10, 10.00, 10.00], [1.70, 8.00, 17.00, 0.10], [8.00, 14.00, 8.00, 14.00] ] p = [ [0.1312, 0.2329, 0.2348, 0.4047], [0.1696, 0.4135, 0.1451, 0.8828], [0.5569, 0.8307, 0.3522, 0.8732], [0.0124, 0.3736, 0.2883, 0.5743], [0.8283, 0.1004, 0.3047, 0.1091], [0.5886, 0.9991, 0.6650, 0.0381] ] c = [1.0, 1.2, 3.0, 3.2] dimension = 6 var_lower = np.array([0, 0, 0, 0, 0, 0]) var_upper = np.array([1, 1, 1, 1, 1, 1]) optimum_point = np.array([0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162, 0.65730054]) optimum_value = -3.32236801141551 var_type = np.array(['R'] * 6)
# -- end class
[docs]class camel: """ Six-hump Camel function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = ((4 - 2.1*x[0]**2 + x[0]**4/3)*x[0]**2 + x[0]*x[1] + (-4 + 4*x[1]**2)*x[1]**2) return(value)
dimension = 2 var_lower = np.array([-3, -2]) var_upper = np.array([3, 2]) optimum_point = np.array([0.08984201, -0.7126]) optimum_value = -1.0316284535 var_type = np.array(['R'] * 2)
# -- end class
[docs]class goldsteinprice: """ Goldstein & Price function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value= ((1 + (x[0] + x[1] + 1)**2 * (19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] + 3*x[1]**2)) * (30 + (2*x[0] - 3*x[1])**2 * (18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] + 27*x[1]**2))) return(value)
dimension = 2 var_lower = np.array([-2, -2]) var_upper = np.array([2, 2]) optimum_point = np.array([0.0, -1.0]) optimum_value = 3 var_type = np.array(['R'] * 2)
# -- end class
[docs]class shekel5: """ Shekel5 function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) value = -math.fsum([ 1.0 / (math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2 for i in range(4) ]), cls.c[j]])) for j in range(5) ]) return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0], [4.0, 1.0, 8.0, 6.0, 7.0], [4.0, 1.0, 8.0, 6.0, 3.0], [4.0, 1.0, 8.0, 6.0, 7.0] ] c = [0.1, 0.2, 0.2, 0.4, 0.4] dimension = 4 var_lower = np.array([0, 0, 0, 0]) var_upper = np.array([10, 10, 10, 10]) optimum_point = np.array([4, 4, 4, 4]) optimum_value = -10.1531958509790 var_type = np.array(['R'] * 4)
# -- end class
[docs]class shekel7: """ Shekel7 function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) value = -math.fsum([ 1.0 / (math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2 for i in range(4) ]), cls.c[j]])) for j in range(7) ]) return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0], [4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0], [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0], [4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0] ] c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3] dimension = 4 var_lower = np.array([0, 0, 0, 0]) var_upper = np.array([10, 10, 10, 10]) optimum_point = np.array([4, 4, 4, 4]) optimum_value = -10.4028188369303 var_type = np.array(['R'] * 4)
# -- end class
[docs]class shekel10: """ Shekel10 function of the Dixon-Szego test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) value = -math.fsum([ 1.0 / (math.fsum([math.fsum([ (x[i] - cls.a[i][j])**2 for i in range(4) ]), cls.c[j]])) for j in range(10) ]) return(value)
a = [ [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0], [4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 5.0, 1.0, 2.0, 3.6], [4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 3.0, 8.0, 6.0, 7.0], [4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6] ] c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5] dimension = 4 var_lower = np.array([0, 0, 0, 0]) var_upper = np.array([10, 10, 10, 10]) optimum_point = np.array([4, 4, 4, 4]) optimum_value = -10.53628372621960 var_type = np.array(['R'] * 4)
# -- end class
[docs]class ex4_1_1: """ ex4_1_1 function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==1) value = (x[0]**6 - (52.0/25.0)*x[0]**5 + (39.0/80.0)*x[0]**4 + (71.0/10.0)*x[0]**3 - (79.0/20.0)*x[0]**2 - x[0] + 1.0/10.0) return(value)
dimension = 1 var_lower = np.array([-2]) var_upper = np.array([11]) optimum_point = np.array([-1.19131]) optimum_value = -7.487312360731 var_type = np.array(['R'])
# -- end class
[docs]class ex4_1_2: """ ex4_1_2 function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==1) a = [-500, 2.5, 1.666666666, 1.25, 1.0, 0.8333333, 0.714285714, 0.625, 0.555555555, 1.0, -43.6363636, 0.41666666, 0.384615384, 0.357142857, 0.3333333, 0.3125, 0.294117647, 0.277777777, 0.263157894, 0.25, 0.238095238, 0.227272727, 0.217391304, 0.208333333, 0.2, 0.192307692, 0.185185185, 0.178571428, 0.344827586, 0.6666666, -15.48387097, 0.15625, 0.1515151, 0.14705882, 0.14285712, 0.138888888, 0.135135135, 0.131578947, 0.128205128, 0.125, 0.121951219, 0.119047619, 0.116279069, 0.113636363, 0.1111111, 0.108695652, 0.106382978, 0.208333333, 0.408163265, 0.8] value = math.fsum([a[i]*x[0]**(i+1) for i in range(50)]) return(value)
dimension = 1 var_lower = np.array([1]) var_upper = np.array([2]) optimum_point = np.array([1.09106]) optimum_value = -663.4993631230575 var_type = np.array(['R'] * 1)
# -- end class
[docs]class ex8_1_1: """ ex8_1_1 function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = np.cos(x[0])*np.sin(x[1]) - x[0]/(x[1]**2+1) return(value)
dimension = 2 var_lower = np.array([-1, -1]) var_upper = np.array([2, 1]) optimum_point = np.array([2.0, 0.105783]) optimum_value = -2.0218067833 var_type = np.array(['R'] * 2)
# -- end class
[docs]class ex8_1_4: """ ex8_1_4 function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = 12*x[0]**2-6.3*x[0]**4+x[0]**6-6*x[0]*x[1]+6*x[1]**2 return(value)
dimension = 2 var_lower = np.array([-2, -5]) var_upper = np.array([4, 2]) optimum_point = np.array([0.0, 0.0]) optimum_value = 0.0 var_type = np.array(['R'] * 2)
# -- end class
[docs]class least: """ least function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==3) value = ((127 + (-x[1]*np.exp(-5*x[2])) - x[0])**2 + (151 + (-x[1]*np.exp(-3*x[2])) - x[0])**2 + (379 + (-x[1]*np.exp(-x[2])) - x[0])**2 + (421 + (-x[1]*np.exp(5*x[2])) - x[0])**2 + (460 + (-x[1]*np.exp(3*x[2])) - x[0])**2 + (426 + (-x[1]*np.exp(x[2])) - x[0])**2) return(value)
dimension = 3 var_lower = np.array([0, -200, -5]) var_upper = np.array([600, 200, 5] ) optimum_point = np.array([516.651174172, -149.351893696, -0.206642767973]) optimum_value = 14085.139848928 var_type = np.array(['R'] * 3)
# -- end class
[docs]class rbrock: """ rbrock function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = (100*(x[1] - x[0]**2)**2 + (1 - x[0])**2) return(value)
dimension = 2 var_lower = np.array([-10, -10]) var_upper = np.array([5, 10]) optimum_point = np.array([1.0, 1.0]) optimum_value = 0.0 var_type = np.array(['R'] * 2)
# -- end class
[docs]class perm_6: """ perm function of dimension 6 from Arnold Neumaier. http://www.mat.univie.ac.at/~neum/glopt/my_problems.html We use parameters (6, 60) here. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) beta = 60 value = math.fsum([ (math.fsum([((i + 1)**k + beta) * ((x[i]/(i+1))**k - 1) for i in range(6)]))**2 for k in range(6) ]) + 1000 return(value)
dimension = 6 var_lower = np.array([-6 for i in range(6)]) var_upper = np.array([6 for i in range(6)]) optimum_point = np.array([(i+1) for i in range(6)]) optimum_value = 1000.0 var_type = np.array(['R'] * 6)
# -- end class
[docs]class perm0_8: """ perm0 function of dimension 8 from Arnold Neumaier. http://www.mat.univie.ac.at/~neum/glopt/my_problems.html We use parameters (8, 100) here. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==8) beta = 100 value = math.fsum([ (math.fsum([(i + 1 + beta) * (x[i]**k - (1/(i+1))**k) for i in range(8)]))**2 for k in range(8) ]) + 1000 return(value)
dimension = 8 var_lower = np.array([-1 for i in range(8)]) var_upper = np.array([1 for i in range(8)]) optimum_point = np.array([1.0/(i+1) for i in range(8)]) optimum_value = 1000.0 var_type = np.array(['R'] * 8)
# -- end class
[docs]class schoen_6_1: """ schoen function of dimension 6 with 50 stationary points. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) numerator = 0.0 denominator = 0.0 dist = np.sum((x - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658], [0.800371, 0.817380, 0.398577, 0.652349, 0.250843, 0.130235], [0.268631, 0.929778, 0.640422, 0.462004, 0.492930, 0.434955], [0.257863, 0.729198, 0.210810, 0.364378, 0.228216, 0.947432], [0.767627, 0.592150, 0.103788, 0.696895, 0.472449, 0.244504], [0.369630, 0.110889, 0.072344, 0.515753, 0.068087, 0.103057], [0.425457, 0.807081, 0.491209, 0.449497, 0.065690, 0.592775], [0.544229, 0.619841, 0.704609, 0.573098, 0.044844, 0.305800], [0.164031, 0.722884, 0.670496, 0.517915, 0.176386, 0.921565], [0.153788, 0.703577, 0.899129, 0.406134, 0.941356, 0.538215], [0.984781, 0.510479, 0.573361, 0.884599, 0.399472, 0.712935], [0.488416, 0.403997, 0.888823, 0.048434, 0.265197, 0.478025], [0.047985, 0.280071, 0.709960, 0.278919, 0.035737, 0.037699], [0.656172, 0.498412, 0.458622, 0.982970, 0.041234, 0.921127], [0.590802, 0.359690, 0.396516, 0.338153, 0.320793, 0.847369], [0.649160, 0.846974, 0.451818, 0.064864, 0.818545, 0.955844], [0.583716, 0.669610, 0.463098, 0.492710, 0.989690, 0.002397], [0.097300, 0.112389, 0.128759, 0.182995, 0.262808, 0.701887], [0.487363, 0.892520, 0.269056, 0.116046, 0.905416, 0.808013], [0.908316, 0.023997, 0.670399, 0.985859, 0.178548, 0.450410], [0.230409, 0.381732, 0.613667, 0.697260, 0.016950, 0.736507], [0.132544, 0.526349, 0.650042, 0.084086, 0.979257, 0.771499], [0.872978, 0.008826, 0.587481, 0.624637, 0.623175, 0.939539], [0.447828, 0.836386, 0.223285, 0.422756, 0.344488, 0.555953], [0.546839, 0.153934, 0.953017, 0.640891, 0.666774, 0.647583], [0.762237, 0.608920, 0.401447, 0.056202, 0.203535, 0.890609], [0.655150, 0.444544, 0.495582, 0.247926, 0.155128, 0.188004], [0.481813, 0.387178, 0.597276, 0.634671, 0.285404, 0.714793], [0.976385, 0.018854, 0.262585, 0.640434, 0.086314, 0.669879], [0.120164, 0.882300, 0.057626, 0.695111, 0.735135, 0.004711], [0.414644, 0.715618, 0.642033, 0.770645, 0.407019, 0.502945], [0.257475, 0.620029, 0.840603, 0.638546, 0.636521, 0.883558], [0.788980, 0.374926, 0.448016, 0.081941, 0.225763, 0.944905], [0.661591, 0.178832, 0.790349, 0.141653, 0.424235, 0.571960], [0.546361, 0.624907, 0.190470, 0.412713, 0.124748, 0.662788], [0.226384, 0.065829, 0.960836, 0.767766, 0.089695, 0.441792], [0.303675, 0.370047, 0.973692, 0.830432, 0.424719, 0.173571], [0.548375, 0.823234, 0.334253, 0.078398, 0.097269, 0.195120], [0.646225, 0.100478, 0.723833, 0.891035, 0.386094, 0.360272], [0.362757, 0.114700, 0.731020, 0.783785, 0.250399, 0.244399], [0.904335, 0.869074, 0.479004, 0.525872, 0.359411, 0.338333], [0.563175, 0.245903, 0.694417, 0.833524, 0.205055, 0.132535], [0.401356, 0.920963, 0.401902, 0.120625, 0.765834, 0.381552], [0.769562, 0.279591, 0.567598, 0.017192, 0.697366, 0.813451], [0.738572, 0.984740, 0.007616, 0.005382, 0.592976, 0.771773], [0.683721, 0.824097, 0.731623, 0.936945, 0.182420, 0.393537], [0.375859, 0.541929, 0.974640, 0.377459, 0.754060, 0.019335], [0.410275, 0.619158, 0.148428, 0.419225, 0.637412, 0.204038], [0.552701, 0.472723, 0.491747, 0.017922, 0.198525, 0.074668], [0.749510, 0.158720, 0.395476, 0.528285, 0.143614, 0.961610]]) f = np.array( [-1000, -1000, -1000, 672.2, 861.4, 520.9, 121.0, 11.5, 48.2, 702.4, 536.2, 457.7, 801.3, 787.7, 768.6, 292.4, 960.0, 573.1, 303.7, 283.3, 474.1, 216.9, 462.2, 853.6, 677.1, 464.6, 830.6, 831.8, 109.6, 967.6, 122.9, 896.2, 490.2, 710.4, 81.1, 802.9, 999.8, 945.5, 672.3, 712.9, 235.8, 266.5, 772.4, 326.6, 585.5, 16.9, 135.9, 224.2, 382.1, 614.6]) dimension = 6 var_lower = np.array([0 for i in range(6)]) var_upper = np.array([1 for i in range(6)]) optimum_point = np.array([0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658]) optimum_value = -1000 var_type = np.array(['R'] * 6)
# -- end class
[docs]class schoen_6_2: """ schoen function of dimension 6 with 50 stationary points. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) numerator = 0.0 denominator = 0.0 dist = np.sum((x - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529], [0.000632, 0.706804, 0.857031, 0.473778, 0.993569, 0.616184], [0.625617, 0.880221, 0.534547, 0.760235, 0.276998, 0.735438], [0.774577, 0.922914, 0.947791, 0.315328, 0.414841, 0.785803], [0.079768, 0.131498, 0.225123, 0.464621, 0.638041, 0.992795], [0.471038, 0.244503, 0.565776, 0.898397, 0.604639, 0.306230], [0.642233, 0.482219, 0.034943, 0.934805, 0.972714, 0.153664], [0.550151, 0.310507, 0.042126, 0.230722, 0.444375, 0.117355], [0.789984, 0.488482, 0.065237, 0.842940, 0.793454, 0.799489], [0.850183, 0.754551, 0.516033, 0.166362, 0.201966, 0.044234], [0.000601, 0.896758, 0.304433, 0.149125, 0.178398, 0.871836], [0.056787, 0.932745, 0.218009, 0.778061, 0.131847, 0.356237], [0.210266, 0.221479, 0.014831, 0.200901, 0.656693, 0.891819], [0.528515, 0.178025, 0.188138, 0.411485, 0.217833, 0.907579], [0.195801, 0.663099, 0.477312, 0.395250, 0.655791, 0.820570], [0.933208, 0.789323, 0.350520, 0.855434, 0.491082, 0.874993], [0.251047, 0.543513, 0.529644, 0.218495, 0.351637, 0.608904], [0.963286, 0.793004, 0.650148, 0.881362, 0.904832, 0.005397], [0.431744, 0.438965, 0.044544, 0.834968, 0.330614, 0.451282], [0.234845, 0.328576, 0.388284, 0.339183, 0.206086, 0.600034], [0.512783, 0.961787, 0.959109, 0.632098, 0.910614, 0.912025], [0.454168, 0.743189, 0.834284, 0.955817, 0.072172, 0.523068], [0.696968, 0.720236, 0.341060, 0.054580, 0.045599, 0.549192], [0.272955, 0.318845, 0.700767, 0.426325, 0.895755, 0.843128], [0.992189, 0.332899, 0.272784, 0.019284, 0.073711, 0.434800], [0.154276, 0.639611, 0.924641, 0.587242, 0.358453, 0.548022], [0.021506, 0.450392, 0.515150, 0.032232, 0.650223, 0.849384], [0.316499, 0.513234, 0.958219, 0.843587, 0.125408, 0.836643], [0.538587, 0.261750, 0.732136, 0.030271, 0.893345, 0.270532], [0.987469, 0.708780, 0.446487, 0.968784, 0.734448, 0.788229], [0.353358, 0.135036, 0.249018, 0.565029, 0.740519, 0.250807], [0.810372, 0.656510, 0.472093, 0.225741, 0.420513, 0.202519], [0.848128, 0.551586, 0.513140, 0.956164, 0.483389, 0.404478], [0.292239, 0.297077, 0.934202, 0.468329, 0.872274, 0.992632], [0.828869, 0.534749, 0.716451, 0.405855, 0.164485, 0.531068], [0.130616, 0.757677, 0.284500, 0.438300, 0.957643, 0.725899], [0.503542, 0.640368, 0.381914, 0.847206, 0.134660, 0.762294], [0.653851, 0.646544, 0.436036, 0.944225, 0.310369, 0.392362], [0.539397, 0.027168, 0.697972, 0.209293, 0.992890, 0.008113], [0.902045, 0.171034, 0.194924, 0.620057, 0.002203, 0.557433], [0.802612, 0.085835, 0.380626, 0.492568, 0.238166, 0.961837], [0.466993, 0.647847, 0.113397, 0.015357, 0.928904, 0.166425], [0.892021, 0.869756, 0.681364, 0.129555, 0.394682, 0.745036], [0.060675, 0.869904, 0.757236, 0.220765, 0.615988, 0.754288], [0.031815, 0.340961, 0.455958, 0.529616, 0.840036, 0.365200], [0.834595, 0.603639, 0.745330, 0.085080, 0.184636, 0.238718], [0.575681, 0.250761, 0.874497, 0.870401, 0.854591, 0.968971], [0.359629, 0.724830, 0.455053, 0.120311, 0.258563, 0.932004], [0.209891, 0.990298, 0.767661, 0.284193, 0.375076, 0.154363], [0.410402, 0.437385, 0.639614, 0.946647, 0.579466, 0.524775]]) f = np.array( [-1000, -1000, -1000, 109.6, 132.4, 558.2, 158.0, 6.2, 205.4, 593.9, 2.4, 399.8, 395.9, 212.6, 976.1, 104.4, 552.1, 436.3, 837.1, 283.7, 779.7, 392.1, 85.8, 885.1, 401.5, 367.5, 694.4, 691.6, 933.1, 590.7, 246.2, 370.0, 54.3, 719.4, 95.2, 276.0, 829.1, 613.6, 242.8, 424.6, 320.6, 666.1, 479.2, 420.0, 956.6, 241.0, 21.1, 169.8, 178.1, 394.4]) dimension = 6 var_lower = np.array([0 for i in range(6)]) var_upper = np.array([1 for i in range(6)]) optimum_point = np.array([0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529]) optimum_value = -1000 var_type = np.array(['R'] * 6)
# -- end class
[docs]class schoen_10_1: """ schoen function of dimension 10 with 50 stationary points. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==10) numerator = 0.0 denominator = 0.0 dist = np.sum((x - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.914871, 0.765230, 0.139426, 0.617466, 0.823635, 0.794003, 0.801171, 0.568811, 0.279434, 0.540422], [0.976983, 0.593277, 0.701115, 0.585262, 0.669106, 0.272906, 0.177127, 0.143389, 0.561181, 0.018744], [0.385208, 0.984106, 0.390066, 0.905970, 0.169600, 0.191291, 0.564157, 0.689910, 0.857031, 0.715390], [0.975998, 0.536904, 0.819333, 0.801793, 0.564454, 0.336124, 0.654190, 0.044197, 0.717416, 0.465807], [0.750519, 0.415284, 0.258927, 0.736115, 0.597744, 0.763716, 0.747691, 0.969633, 0.188117, 0.964954], [0.412888, 0.671756, 0.380214, 0.558595, 0.768370, 0.998320, 0.212183, 0.606757, 0.531315, 0.303569], [0.196682, 0.139879, 0.108608, 0.736975, 0.755971, 0.021390, 0.852398, 0.188596, 0.920133, 0.045012], [0.956270, 0.729258, 0.397664, 0.013146, 0.519861, 0.300011, 0.008396, 0.820346, 0.176841, 0.402298], [0.126432, 0.872346, 0.923581, 0.297492, 0.992744, 0.486525, 0.915493, 0.589980, 0.498242, 0.989945], [0.697409, 0.026641, 0.875467, 0.503039, 0.563285, 0.096769, 0.933643, 0.884419, 0.585825, 0.395465], [0.494783, 0.824300, 0.153326, 0.202651, 0.579815, 0.416954, 0.707624, 0.497959, 0.568876, 0.812841], [0.126963, 0.757337, 0.648583, 0.787445, 0.822586, 0.401155, 0.301350, 0.562707, 0.744074, 0.088372], [0.293611, 0.835864, 0.925111, 0.760322, 0.729456, 0.096840, 0.651466, 0.975836, 0.691353, 0.038384], [0.999250, 0.916829, 0.205699, 0.027241, 0.156956, 0.206598, 0.175242, 0.811219, 0.660192, 0.119865], [0.387978, 0.665180, 0.774376, 0.135223, 0.766238, 0.380668, 0.058279, 0.727506, 0.991527, 0.345759], [0.299341, 0.066231, 0.680305, 0.392230, 0.319985, 0.698292, 0.100236, 0.394973, 0.096232, 0.362943], [0.281548, 0.860858, 0.647870, 0.981650, 0.110777, 0.836484, 0.697387, 0.659942, 0.694425, 0.434991], [0.606706, 0.052287, 0.858208, 0.738885, 0.158495, 0.002367, 0.933796, 0.112986, 0.647308, 0.421573], [0.776505, 0.101364, 0.610406, 0.275033, 0.548409, 0.998967, 0.536743, 0.943903, 0.960993, 0.251672], [0.371347, 0.491122, 0.772374, 0.860206, 0.752131, 0.338591, 0.826739, 0.312111, 0.768881, 0.862719], [0.866886, 0.358220, 0.131205, 0.276334, 0.334111, 0.429525, 0.752197, 0.167524, 0.437764, 0.162916], [0.584246, 0.511215, 0.659647, 0.349220, 0.954428, 0.477982, 0.386041, 0.813944, 0.753530, 0.983276], [0.697327, 0.499835, 0.530487, 0.599958, 0.497257, 0.998852, 0.106262, 0.186978, 0.887481, 0.749174], [0.041611, 0.278918, 0.999095, 0.825221, 0.218320, 0.383711, 0.077041, 0.642061, 0.668906, 0.758298], [0.072437, 0.592862, 0.040655, 0.446330, 0.651659, 0.055738, 0.631924, 0.890039, 0.192989, 0.741054], [0.533886, 0.135079, 0.787647, 0.593408, 0.749228, 0.749045, 0.190386, 0.755508, 0.465321, 0.465156], [0.748843, 0.696419, 0.882124, 0.843895, 0.858057, 0.220107, 0.350310, 0.102947, 0.453576, 0.875940], [0.560231, 0.580247, 0.381834, 0.807535, 0.184636, 0.615702, 0.628408, 0.081783, 0.793384, 0.233639], [0.384827, 0.589138, 0.630013, 0.634506, 0.630712, 0.521293, 0.494486, 0.681700, 0.288512, 0.319808], [0.721978, 0.452289, 0.426726, 0.323106, 0.781584, 0.999325, 0.043670, 0.884560, 0.520936, 0.430684], [0.810388, 0.624041, 0.811624, 0.105973, 0.199807, 0.440644, 0.864152, 0.282280, 0.397116, 0.499932], [0.973889, 0.677797, 0.080137, 0.549098, 0.625445, 0.577342, 0.538642, 0.388039, 0.552273, 0.793807], [0.365176, 0.228017, 0.623500, 0.084450, 0.177343, 0.910108, 0.632719, 0.521458, 0.894843, 0.707893], [0.502069, 0.622312, 0.958019, 0.744999, 0.515695, 0.407885, 0.590739, 0.736542, 0.297555, 0.237955], [0.313835, 0.090014, 0.336274, 0.433171, 0.330864, 0.105751, 0.160367, 0.651934, 0.207260, 0.293577], [0.886072, 0.592935, 0.498116, 0.321835, 0.011216, 0.543911, 0.506579, 0.216779, 0.406812, 0.261349], [0.789947, 0.881332, 0.696597, 0.742955, 0.252224, 0.718157, 0.188217, 0.371208, 0.178640, 0.347720], [0.482759, 0.663618, 0.622706, 0.036170, 0.278854, 0.088147, 0.482808, 0.134824, 0.028828, 0.944537], [0.184705, 0.662346, 0.917194, 0.186490, 0.918392, 0.955111, 0.636015, 0.447595, 0.813716, 0.372839], [0.231741, 0.637199, 0.745257, 0.201568, 0.697485, 0.897022, 0.239791, 0.495219, 0.153831, 0.387172], [0.198061, 0.194102, 0.550259, 0.751804, 0.503973, 0.034252, 0.788267, 0.731760, 0.118338, 0.057247], [0.068470, 0.545180, 0.668845, 0.714932, 0.688014, 0.203845, 0.146138, 0.109039, 0.470214, 0.441797], [0.085180, 0.142394, 0.938665, 0.071422, 0.946796, 0.697832, 0.472400, 0.161384, 0.325715, 0.122550], [0.637672, 0.986961, 0.969438, 0.989508, 0.381318, 0.800871, 0.012035, 0.326007, 0.459124, 0.645374], [0.147210, 0.954608, 0.361146, 0.094699, 0.092327, 0.301664, 0.478447, 0.008274, 0.680576, 0.004184], [0.768792, 0.812618, 0.915766, 0.029070, 0.506944, 0.457816, 0.839167, 0.024706, 0.990756, 0.088779], [0.872678, 0.601536, 0.948347, 0.621023, 0.415621, 0.289340, 0.291338, 0.190461, 0.664007, 0.583513], [0.641216, 0.700152, 0.080576, 0.355500, 0.294700, 0.338614, 0.563964, 0.528079, 0.759223, 0.508432], [0.738489, 0.077376, 0.429485, 0.300586, 0.576927, 0.185931, 0.231659, 0.954833, 0.614178, 0.092903], [0.729321, 0.318607, 0.768657, 0.899419, 0.749499, 0.623403, 0.671793, 0.052835, 0.973726, 0.168336]]) f = np.array( [-1000, -1000, -1000, 799.1, 396.8, 370.3, 400.2, 239.7, 678.8, 868.9, 564.4, 681.6, 153.0, 760.7, 562.9, 434.9, 579.2, 260.6, 88.5, 601.3, 754.8, 894.8, 672.8, 633.7, 921.8, 43.2, 286.2, 945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8, 555.8, 136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3, 111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2]) dimension = 10 var_lower = np.array([0 for i in range(10)]) var_upper = np.array([1 for i in range(10)]) optimum_point = np.array([0.914871, 0.765230, 0.139426, 0.617466, 0.823635, 0.794003, 0.801171, 0.568811, 0.279434, 0.540422]) optimum_value = -1000 var_type = np.array(['R'] * 10)
# -- end class
[docs]class schoen_10_2: """ schoen function of dimension 10 with 50 stationary points. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==10) numerator = 0.0 denominator = 0.0 dist = np.sum((x - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.131461, 0.965235, 0.046134, 0.983011, 0.719813, 0.827542, 0.662422, 0.570546, 0.578707, 0.013264], [0.068454, 0.682785, 0.582736, 0.434517, 0.310613, 0.869876, 0.993949, 0.629156, 0.590599, 0.356378], [0.632837, 0.961665, 0.015079, 0.378878, 0.805608, 0.685239, 0.528658, 0.752934, 0.717790, 0.374865], [0.286191, 0.912944, 0.400358, 0.902532, 0.324887, 0.850063, 0.483503, 0.764147, 0.147726, 0.159851], [0.303483, 0.754790, 0.090527, 0.653764, 0.164323, 0.402931, 0.593477, 0.448444, 0.711483, 0.113869], [0.057398, 0.302029, 0.596351, 0.565466, 0.694204, 0.974864, 0.323989, 0.298493, 0.859391, 0.238714], [0.139267, 0.214902, 0.608462, 0.297987, 0.499810, 0.578553, 0.548077, 0.208442, 0.046162, 0.246848], [0.680420, 0.783181, 0.828103, 0.475810, 0.680401, 0.188455, 0.015200, 0.650103, 0.762389, 0.063985], [0.409243, 0.600740, 0.302354, 0.588411, 0.436291, 0.294790, 0.701477, 0.994162, 0.433749, 0.535320], [0.077949, 0.530126, 0.869737, 0.387811, 0.705317, 0.632911, 0.442087, 0.082918, 0.441383, 0.591975], [0.622628, 0.054964, 0.020475, 0.145616, 0.163873, 0.321546, 0.282867, 0.743494, 0.750568, 0.732386], [0.538574, 0.066932, 0.225204, 0.290045, 0.613242, 0.529365, 0.384018, 0.946557, 0.974384, 0.425297], [0.108817, 0.850094, 0.886417, 0.161581, 0.082973, 0.506354, 0.589650, 0.638991, 0.045151, 0.688464], [0.917742, 0.365119, 0.484176, 0.173231, 0.210253, 0.303688, 0.992141, 0.023109, 0.977178, 0.535146], [0.183469, 0.198085, 0.511596, 0.275610, 0.753700, 0.437328, 0.986237, 0.028654, 0.767921, 0.997910], [0.484908, 0.759122, 0.577318, 0.359934, 0.935730, 0.617833, 0.770173, 0.311175, 0.004831, 0.157457], [0.634077, 0.236972, 0.016427, 0.261753, 0.349712, 0.245870, 0.412238, 0.523557, 0.985327, 0.094060], [0.477875, 0.803438, 0.496728, 0.848920, 0.497386, 0.938203, 0.279797, 0.287076, 0.395184, 0.980546], [0.450215, 0.193712, 0.975838, 0.103925, 0.077410, 0.709573, 0.253072, 0.311723, 0.885664, 0.204528], [0.557312, 0.815198, 0.097914, 0.539142, 0.826048, 0.130070, 0.049858, 0.223634, 0.076387, 0.831224], [0.927559, 0.324916, 0.563393, 0.209281, 0.344394, 0.953384, 0.298679, 0.890637, 0.966615, 0.380006], [0.026403, 0.997573, 0.479163, 0.379686, 0.687928, 0.832002, 0.214326, 0.348248, 0.073151, 0.062646], [0.726869, 0.911171, 0.961920, 0.874884, 0.216867, 0.076966, 0.776240, 0.495777, 0.963492, 0.425246], [0.357483, 0.486330, 0.759177, 0.748362, 0.889904, 0.350438, 0.232983, 0.823613, 0.792656, 0.441264], [0.875826, 0.359459, 0.214808, 0.425850, 0.493328, 0.456048, 0.523145, 0.504154, 0.090128, 0.472437], [0.813400, 0.808407, 0.427211, 0.902524, 0.210376, 0.490662, 0.915939, 0.169439, 0.078865, 0.485371], [0.877334, 0.982207, 0.679085, 0.486335, 0.940715, 0.585964, 0.289279, 0.694886, 0.172625, 0.201457], [0.141599, 0.476124, 0.762246, 0.067045, 0.411332, 0.813196, 0.134138, 0.302390, 0.856145, 0.349243], [0.346912, 0.082142, 0.787442, 0.857465, 0.371129, 0.448550, 0.967943, 0.775340, 0.943681, 0.656127], [0.619267, 0.547196, 0.470422, 0.141566, 0.584198, 0.952226, 0.196462, 0.629549, 0.685469, 0.824365], [0.014209, 0.789812, 0.836373, 0.186139, 0.493840, 0.710697, 0.910033, 0.368287, 0.865953, 0.140892], [0.482763, 0.072574, 0.026730, 0.143687, 0.739505, 0.419649, 0.013683, 0.662644, 0.785254, 0.234561], [0.821421, 0.844100, 0.153937, 0.671762, 0.290469, 0.631347, 0.591435, 0.498966, 0.043395, 0.176771], [0.404994, 0.496656, 0.951774, 0.497357, 0.715401, 0.023378, 0.493045, 0.342766, 0.117055, 0.698590], [0.985857, 0.831692, 0.423498, 0.215757, 0.341260, 0.790760, 0.941186, 0.716883, 0.062641, 0.582012], [0.676905, 0.280897, 0.800638, 0.898913, 0.735995, 0.592412, 0.433021, 0.432772, 0.874477, 0.112375], [0.377382, 0.118941, 0.529204, 0.419434, 0.673891, 0.074904, 0.129868, 0.819585, 0.220536, 0.353223], [0.233415, 0.136703, 0.487256, 0.777498, 0.901915, 0.612402, 0.778635, 0.436718, 0.484520, 0.641969], [0.273297, 0.670196, 0.344525, 0.669751, 0.180230, 0.530085, 0.393284, 0.326043, 0.260840, 0.364690], [0.931213, 0.676123, 0.912481, 0.898258, 0.001887, 0.408306, 0.917215, 0.496959, 0.287951, 0.562511], [0.047196, 0.780338, 0.895994, 0.088169, 0.552425, 0.130790, 0.308504, 0.232476, 0.187952, 0.105936], [0.343517, 0.356222, 0.416018, 0.450278, 0.487765, 0.040510, 0.592363, 0.771635, 0.577849, 0.315843], [0.527759, 0.529503, 0.210423, 0.756794, 0.892670, 0.339374, 0.445837, 0.363265, 0.432114, 0.942045], [0.560107, 0.110906, 0.115725, 0.761393, 0.969105, 0.921166, 0.455014, 0.593512, 0.111887, 0.217300], [0.463382, 0.635591, 0.329484, 0.573602, 0.492558, 0.474174, 0.371906, 0.850465, 0.467637, 0.261373], [0.033051, 0.422543, 0.294155, 0.699026, 0.846231, 0.047967, 0.686826, 0.480273, 0.463181, 0.345601], [0.285473, 0.723925, 0.202386, 0.671909, 0.685277, 0.993969, 0.415329, 0.155218, 0.233826, 0.088752], [0.029705, 0.651519, 0.813239, 0.677718, 0.961189, 0.285385, 0.824635, 0.837670, 0.524970, 0.815489], [0.519627, 0.508274, 0.141067, 0.156163, 0.274566, 0.536322, 0.834749, 0.852042, 0.656166, 0.964211], [0.119675, 0.971352, 0.052983, 0.178217, 0.408438, 0.215091, 0.102098, 0.256312, 0.051758, 0.906712]]) f = np.array( [-1000, -1000, -1000, 90.4, 830.9, 52.7, 375.2, 289.7, 244.1, 470.2, 111.7, 968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9, 11.0, 454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3, 729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0, 501.2, 568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6, 127.2, 943.4, 437.2, 199.7, 415.4, 966.0, 362.3]) dimension = 10 var_lower = np.array([0 for i in range(10)]) var_upper = np.array([1 for i in range(10)]) optimum_point = np.array([0.131461, 0.965235, 0.046134, 0.983011, 0.719813, 0.827542, 0.662422, 0.570546, 0.578707, 0.013264]) optimum_value = -1000 var_type = np.array(['R'] * 10)
# -- end class
[docs]class schaeffer_f7_12_1: """ Schaeffer F7 function. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==12) value = 0 normalizer = 1.0/float(len(x)-1) for i in range(len(x)-1): si = 2**i*np.sqrt((x[i]-cls.optimum_point[i])**2 + (x[i+1]-cls.optimum_point[i+1])**2) value += (normalizer * np.sqrt(si) * (np.sin(50*si**0.20) + 1))**2 return value - 10
dimension = 12 var_lower = np.array([-50 for i in range(12)]) var_upper = np.array([50 for i in range(12)]) optimum_point = np.array([-34.32567, -34.98896, 07.69262, 30.3388, -48.24371, 23.18355, 24.93374, 32.07436, 46.86153, 04.64872, 25.64591, -16.69128]) optimum_value = -10 var_type = np.array(['R'] * 12)
# -- end class
[docs]class schaeffer_f7_12_2: """ Schaeffer F7 function. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==12) value = 0 normalizer = 1.0/float(len(x)-1) for i in range(len(x)-1): si = 3**i*np.sqrt((x[i]-cls.optimum_point[i])**2 + (x[i+1]-cls.optimum_point[i+1])**2) value += (normalizer * np.sqrt(si) * (np.sin(50*si**0.20) + 1))**2 return value + 10
dimension = 12 var_lower = np.array([-50 for i in range(12)]) var_upper = np.array([50 for i in range(12)]) optimum_point = np.array([-08.214, 30.69133, 48.26095, -04.94219, 15.15357, 00.4841, -13.54025, -40.78766, -16.02916, 16.42138, 39.30248, -49.56986]) optimum_value = 10 var_type = np.array(['R'] * 12)
# -- end class # After this point, all functions are MINLP
[docs]class gear: """ gear function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) value = ((0.14427932477276 - x[0]*x[1]/(x[2]*x[3]))**2) return(value)
dimension = 4 var_lower = np.array([12, 12, 12, 12]) var_upper = np.array([60, 60, 60, 60]) optimum_point = np.array([12.0, 23.0, 58.0, 33.0]) optimum_value = 0.0 var_type = np.array(['I'] * 4)
# -- end class
[docs]class gear4: """ gear4 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==5) value = -1000000*x[0]*x[1]/(x[2]*x[3]) + 2*x[4] + 144279.32477276 # There is a constraint: # -1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276 >= 0 penalty = 10*max(0,-(-1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276)) return(value + penalty)
dimension = 5 var_lower = np.array([12, 12, 12, 12, 0]) var_upper = np.array([60, 60, 60, 60, 100]) optimum_point = np.array([19.0, 16.0, 43.0, 49.0, 1.64342847396619]) optimum_value = 1.6434284739 var_type = np.array(['I'] * 4 + ['R'])
# -- end class
[docs]class nvs02: """ nvs02 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==5) value = (0.0001*(5.3578547*np.sqrt(x[2]) + 0.8356891*x[0]*x[4] + 37.293239*x[0]) + 5.9207859) # There are three constraints: # 0 <= (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] - # 0.0022053*x[2]*x[4] + 85.334407) <= 92 # 90 <= (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] + # 0.0021813*math.sqrt(x[2]) + 80.51249) <= 110 # 20 <= (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] + # 0.0019085*x[2]*x[3] + 9.300961) <= 25 penalty = 0.0 penalty += 10*max(0, -(0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] - 0.0022053*x[2]*x[4] + 85.334407)) penalty += 10*max(0, (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] - 0.0022053*x[2]*x[4] + 85.334407) - 92) penalty += 10*max(0, -(0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] + 0.0021813*np.sqrt(x[2]) + 80.51249) + 90) penalty += 10*max(0, (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] + 0.0021813*np.sqrt(x[2]) + 80.51249) - 110) penalty += 10*max(0, -(0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] + 0.0019085*x[2]*x[3] + 9.300961) + 20) penalty += 10*max(0, (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] + 0.0019085*x[2]*x[3] + 9.300961) - 25) return(value + penalty)
dimension = 5 var_lower = np.array([0, 0, 0, 0, 0]) var_upper = np.array([200, 200, 200, 200, 200]) optimum_point = np.array([0.0, 9.0, 9.0, 200.0, 197.0]) optimum_value = 5.9223932564100004 var_type = np.array(['I'] * 5)
# -- end class
[docs]class nvs03: """ nvs03 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = (-8 + x[0])**2 + (-2 + x[1])**2 # There are two constraints: # -0.1*x[0]**2 + x[1] >= 0 # -0.333333333333333*x[0] - x[1] + 4.5 >= 0.0 penalty = 0.0 penalty += 100*max(0, -(-0.1*x[0]**2 + x[1])) penalty += 100*max(0, -(-0.333333333333333*x[0] - x[1] + 4.5)) return(value + penalty)
dimension = 2 var_lower = np.array([0, 0]) var_upper = np.array([200, 200]) optimum_point = np.array([4.0, 2.0]) optimum_value = 16.0 var_type = np.array(['I'] * 2)
# -- end class
[docs]class nvs04: """ nvs04 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = 100*(0.5 + x[1] - (0.6 + x[0])**2)**2 + (0.4 - x[0])**2 return(value)
dimension = 2 var_lower = np.array([0, 0]) var_upper = np.array([200, 200]) optimum_point = np.array([1.0, 2.0]) optimum_value = 0.72 var_type = np.array(['I'] * 2)
# -- end class
[docs]class nvs06: """ nvs06 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = (0.1*((x[0])**2 + (1 + (x[1])**2)/(x[0])**2 + (100 + ((x[0])**2)*(x[1])**2)/(x[0]*x[1])**4) + 1.2) return(value)
dimension = 2 var_lower = np.array([1, 1]) var_upper = np.array([200, 200]) optimum_point = np.array([2.0, 2.0]) optimum_value = 1.7703125 var_type = np.array(['I'] * 2)
# -- end class
[docs]class nvs07: """ nvs07 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==3) value = 2*x[1]**2 + x[0] + 5*x[2] # There are two constraints: # x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10 >= 0 # x[0] - x[2] - 2.66 >= 0 penalty = 0.0 penalty += 10*max(0, -(x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10)) penalty += 10*max(0, -(x[0] - x[2] - 2.66)) return(value + penalty)
dimension = 3 var_lower = np.array([0, 0, 0]) var_upper = np.array([200, 200, 200]) optimum_point = np.array([4.0, 0.0, 0.0]) optimum_value = 4.0 var_type = np.array(['I'] * 3)
# -- end class
[docs]class nvs09: """ nvs09 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==10) value = ((np.log(x[0] - 2))**2 + (np.log(10 - x[0]))**2 + (np.log(x[1] - 2))**2 + (np.log(10 - x[1]))**2 + (np.log(x[2] - 2))**2 + (np.log(10 - x[2]))**2 + (np.log(x[3] - 2))**2 + (np.log(10 - x[3]))**2 + (np.log(x[4] - 2))**2 + (np.log(10 - x[4]))**2 + (np.log(x[5] - 2))**2 + (np.log(10 - x[5]))**2 + (np.log(x[6] - 2))**2 + (np.log(10 - x[6]))**2 + (np.log(x[7] - 2))**2 + (np.log(10 - x[7]))**2 + (np.log(x[8] - 2))**2 + (np.log(10 - x[8]))**2 + (np.log(x[9] - 2))**2 + (np.log(10 - x[9]))**2 - (x[0]*x[1]*x[2]*x[3]*x[4]*x[5]*x[6]*x[7]*x[8]*x[9])**0.2) return(value)
dimension = 10 var_lower = np.array([3 for i in range(10)]) var_upper = np.array([9 for i in range(10)]) optimum_point = np.array([9, 9, 9, 9, 9, 9, 9, 9, 9, 9]) optimum_value = -43.134336918035 var_type = np.array(['I'] * 10)
# -- end class
[docs]class nvs14: """ nvs14 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==5) value = (5.3578547*x[2]**2 + 0.8356891*x[0]*x[4] + 37.293239*x[0] - 40792.141) # There are three constraints: # 0 <= (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] - # 0.0022053*x[2]*x[4] + 85.334407) <= 92 # 90 <= (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] + # 0.0021813*x[2]**2 + 80.51249) <= 110 # 20 <= (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] + # 0.0019085*x[2]*x[3] + 9.300961) <= 25 penalty = 0.0 penalty += 1000*max(0, -(0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] - 0.0022053*x[2]*x[4] + 85.334407)) penalty += 1000*max(0, (0.0056858*x[1]*x[4] + 0.0006262*x[0]*x[3] - 0.0022053*x[2]*x[4] + 85.334407) - 92) penalty += 1000*max(0, -(0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] + 0.0021813*x[2]**2 + 80.51249) + 90) penalty += 1000*max(0, (0.0071317*x[1]*x[4] + 0.0029955*x[0]*x[1] + 0.0021813*x[2]**2 + 80.51249) - 110) penalty += 1000*max(0, -(0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] + 0.0019085*x[2]*x[3] + 9.300961) + 20) penalty += 1000*max(0, (0.0047026*x[2]*x[4] + 0.0012547*x[0]*x[2] + 0.0019085*x[2]*x[3] + 9.300961) - 25) return(value + penalty)
dimension = 5 var_lower = np.array([0, 0, 0, 0, 0]) var_upper = np.array([200, 200, 200, 200, 200]) optimum_point = np.array([0.0, 7.0, 9.0, 175.0, 200.0]) optimum_value = -40358.1547693 var_type = np.array(['I'] * 5)
# -- end class
[docs]class nvs15: """ nvs15 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==3) value = (2*x[0]**2 - 8*x[0] + 2*x[1]**2 - 6*x[1] + x[2]**2 - 4*x[2] + 2*x[0]*x[1] + 2*x[0]*x[2] + 9) # There is one constraint: # - x[0] - x[1] - 2*x[2] + 3 >= 0 penalty = 0.0 penalty += 10*max(0, -(-x[0] - x[1] - 2*x[2] + 3)) return(value + penalty)
dimension = 3 var_lower = np.array([0, 0, 0]) var_upper = np.array([200, 200, 200]) optimum_point = np.array([2.0, 0.0, 0.0]) optimum_value = 1.0 var_type = np.array(['I'] * 3)
# -- end class
[docs]class nvs16: """ nvs16 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = ((1.5 - x[0]*(1 - x[1]))**2 + (2.25 - x[0]*(1 - x[1]**2))**2 + (2.625 - x[0]*(1 - x[1]**3))**2) return(value)
dimension = 2 var_lower = np.array([0, 0]) var_upper = np.array([200, 200]) optimum_point = np.array([2.0, 0.0]) optimum_value = 0.703125 var_type = np.array(['I'] * 2)
# -- end class
[docs]class prob03: """ prob03 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = 3*x[0] + 2*x[1] # There is one constraint: # x[0]*x[1] - 3.5 >= 0 penalty = 10*max(0, -(x[0]*x[1] - 3.5)) return(value + penalty)
dimension = 2 var_lower = np.array([1, 1]) var_upper = np.array([5, 5]) optimum_point = np.array([2.0, 2.0]) optimum_value = 10.0 var_type = np.array(['I'] * 2)
# -- end class
[docs]class sporttournament06: """ sporttournament06 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==15) value = (2*x[0]*x[2] - 2*x[0] + 2*x[2] + 2*x[0]*x[6] - 2*x[6] + 2*x[1]*x[4] - 2*x[1] - 2*x[4] + 2*x[1]*x[9] - 4*x[9] - 2*x[2]*x[3] + 2*x[3] - 2*x[2]*x[11] - 2*x[2]*x[13] - 2*x[3]*x[4] + 2*x[3]*x[8] - 2*x[8] - 2*x[3]*x[14] + 2*x[4]*x[5] - 2*x[5] + 2*x[4]*x[7] - 2*x[7] + 2*x[5]*x[8] - 2*x[6]*x[7] + 2*x[6]* x[11] + 2*x[6]*x[12] + 2*x[7]*x[9] + 2*x[7]*x[14] + 2*x[8]*x[10] - 2*x[10] - 2*x[8]*x[11] + 2*x[9]* x[10] + 2*x[9]*x[11] - 2*x[12]*x[14] + 2*x[13]*x[14]) return(value)
dimension = 15 var_lower = np.array([0] * 15) var_upper = np.array([1] * 15) optimum_point = np.array([0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0]) optimum_value = -12.0 var_type = np.array(['I'] * 15)
# -- end class
[docs]class st_miqp1: """ st_miqp1 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==5) value = (50*x[0]*x[0] + 42*x[0] + 50*x[1]*x[1] + 44*x[1] + 50*x[2]*x[2] + 45*x[2] + 50*x[3]*x[3] + 47*x[3] + 50*x[4]*x[4] + 47.5*x[4]) # There is one constraint: # 20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] + 4*x[4] - 40 >= 0 penalty = 100*max(0, -(20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] + 4*x[4] - 40)) return(value + penalty)
dimension = 5 var_lower = np.array([0, 0, 0, 0, 0]) var_upper = np.array([1, 1, 1, 1, 1]) optimum_point = np.array([1.0, 1.0, 1.0, 0.0, 0.0]) optimum_value = 281.0 var_type = np.array(['I'] * 5)
# -- end class
[docs]class st_miqp3: """ st_miqp3 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==2) value = (6*x[0]*x[0] - 3*x[1]) # There is one constraint: # 4*x[0] - x[1] >= 0 penalty = 10*max(0, -(4*x[0] - x[1])) return(value + penalty)
dimension = 2 var_lower = np.array([0, 0]) var_upper = np.array([3, 50]) optimum_point = np.array([1.0, 4.0]) optimum_value = -6.0 var_type = np.array(['I'] * 2)
# -- end class
[docs]class st_test1: """ st_test1 function of the MINLPLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==5) value = (50*x[0]*x[0] + 42*x[0] + 50*x[1]*x[1] - 44*x[1] + 50*x[3]*x[3] - 47*x[3] + 50*x[4]*x[4] - 47.5*x[4] + 45*x[2]) # There is one constraint: # -20*x[0] - 12*x[1] - 11*x[2] - 7*x[3] - 4*x[4] + 40 >= 0 penalty = 10*max(0, -(-20*x[0] - 12*x[1] - 11*x[2] - 7*x[3] - 4*x[4] + 40)) return(value + penalty)
dimension = 5 var_lower = np.array([0, 0, 0, 0, 0]) var_upper = np.array([1, 1, 1, 1, 1]) optimum_point = np.array([0.0, 0.0, 0.0, 0.0, 0.0]) optimum_value = 0.0 var_type = np.array(['I'] * 5)
# -- end class
[docs]class schoen_6_1_int: """ schoen function of dimension 6 with 50 stationary points. Mixed integer version. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) numerator = 0.0 denominator = 0.0 dist = np.sum((x/10 - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.298854, 0.181010, 0.984817, 0.125272, 0.548396, 0.894658], [0.800371, 0.817380, 0.398577, 0.652349, 0.250843, 0.130235], [0.268631, 0.929778, 0.640422, 0.462004, 0.492930, 0.434955], [0.257863, 0.729198, 0.210810, 0.364378, 0.228216, 0.947432], [0.767627, 0.592150, 0.103788, 0.696895, 0.472449, 0.244504], [0.369630, 0.110889, 0.072344, 0.515753, 0.068087, 0.103057], [0.425457, 0.807081, 0.491209, 0.449497, 0.065690, 0.592775], [0.544229, 0.619841, 0.704609, 0.573098, 0.044844, 0.305800], [0.164031, 0.722884, 0.670496, 0.517915, 0.176386, 0.921565], [0.153788, 0.703577, 0.899129, 0.406134, 0.941356, 0.538215], [0.984781, 0.510479, 0.573361, 0.884599, 0.399472, 0.712935], [0.488416, 0.403997, 0.888823, 0.048434, 0.265197, 0.478025], [0.047985, 0.280071, 0.709960, 0.278919, 0.035737, 0.037699], [0.656172, 0.498412, 0.458622, 0.982970, 0.041234, 0.921127], [0.590802, 0.359690, 0.396516, 0.338153, 0.320793, 0.847369], [0.649160, 0.846974, 0.451818, 0.064864, 0.818545, 0.955844], [0.583716, 0.669610, 0.463098, 0.492710, 0.989690, 0.002397], [0.097300, 0.112389, 0.128759, 0.182995, 0.262808, 0.701887], [0.487363, 0.892520, 0.269056, 0.116046, 0.905416, 0.808013], [0.908316, 0.023997, 0.670399, 0.985859, 0.178548, 0.450410], [0.230409, 0.381732, 0.613667, 0.697260, 0.016950, 0.736507], [0.132544, 0.526349, 0.650042, 0.084086, 0.979257, 0.771499], [0.872978, 0.008826, 0.587481, 0.624637, 0.623175, 0.939539], [0.447828, 0.836386, 0.223285, 0.422756, 0.344488, 0.555953], [0.546839, 0.153934, 0.953017, 0.640891, 0.666774, 0.647583], [0.762237, 0.608920, 0.401447, 0.056202, 0.203535, 0.890609], [0.655150, 0.444544, 0.495582, 0.247926, 0.155128, 0.188004], [0.481813, 0.387178, 0.597276, 0.634671, 0.285404, 0.714793], [0.976385, 0.018854, 0.262585, 0.640434, 0.086314, 0.669879], [0.120164, 0.882300, 0.057626, 0.695111, 0.735135, 0.004711], [0.414644, 0.715618, 0.642033, 0.770645, 0.407019, 0.502945], [0.257475, 0.620029, 0.840603, 0.638546, 0.636521, 0.883558], [0.788980, 0.374926, 0.448016, 0.081941, 0.225763, 0.944905], [0.661591, 0.178832, 0.790349, 0.141653, 0.424235, 0.571960], [0.546361, 0.624907, 0.190470, 0.412713, 0.124748, 0.662788], [0.226384, 0.065829, 0.960836, 0.767766, 0.089695, 0.441792], [0.303675, 0.370047, 0.973692, 0.830432, 0.424719, 0.173571], [0.548375, 0.823234, 0.334253, 0.078398, 0.097269, 0.195120], [0.646225, 0.100478, 0.723833, 0.891035, 0.386094, 0.360272], [0.362757, 0.114700, 0.731020, 0.783785, 0.250399, 0.244399], [0.904335, 0.869074, 0.479004, 0.525872, 0.359411, 0.338333], [0.563175, 0.245903, 0.694417, 0.833524, 0.205055, 0.132535], [0.401356, 0.920963, 0.401902, 0.120625, 0.765834, 0.381552], [0.769562, 0.279591, 0.567598, 0.017192, 0.697366, 0.813451], [0.738572, 0.984740, 0.007616, 0.005382, 0.592976, 0.771773], [0.683721, 0.824097, 0.731623, 0.936945, 0.182420, 0.393537], [0.375859, 0.541929, 0.974640, 0.377459, 0.754060, 0.019335], [0.4, 0.6, 0.1, 0.4, 0.637412, 0.204038], [0.5, 0.4, 0.4, 0.0, 0.198525, 0.074668], [0.7, 0.1, 0.3, 0.5, 0.143614, 0.961610]]) f = np.array( [672.2, 861.4, 520.9, 121.0, 11.5, 48.2, 702.4, 536.2, 457.7, 801.3, 787.7, 768.6, 292.4, 960.0, 573.1, 303.7, 283.3, 474.1, 216.9, 462.2, 853.6, 677.1, 464.6, 830.6, 831.8, 109.6, 967.6, 122.9, 896.2, 490.2, 710.4, 81.1, 802.9, 999.8, 945.5, 672.3, 712.9, 235.8, 266.5, 772.4, 326.6, 585.5, 16.9, 135.9, 224.2, 382.1, 614.6, -1000, -1000, -1000]) dimension = 6 var_lower = np.array([0 for i in range(6)]) var_upper = np.array([10 for i in range(6)]) optimum_point = np.array([04., 06., 01., 04., 06.37412, 02.04038]) optimum_value = -1000 var_type = np.array(['I'] * 4 + ['R'] * 2)
# -- end class
[docs]class schoen_6_2_int: """ schoen function of dimension 6 with 50 stationary points. Mixed integer version. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) numerator = 0.0 denominator = 0.0 dist = np.sum((x/10 - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.669711, 0.815540, 0.646120, 0.377447, 0.111538, 0.040529], [0.000632, 0.706804, 0.857031, 0.473778, 0.993569, 0.616184], [0.625617, 0.880221, 0.534547, 0.760235, 0.276998, 0.735438], [0.774577, 0.922914, 0.947791, 0.315328, 0.414841, 0.785803], [0.079768, 0.131498, 0.225123, 0.464621, 0.638041, 0.992795], [0.471038, 0.244503, 0.565776, 0.898397, 0.604639, 0.306230], [0.642233, 0.482219, 0.034943, 0.934805, 0.972714, 0.153664], [0.550151, 0.310507, 0.042126, 0.230722, 0.444375, 0.117355], [0.789984, 0.488482, 0.065237, 0.842940, 0.793454, 0.799489], [0.850183, 0.754551, 0.516033, 0.166362, 0.201966, 0.044234], [0.000601, 0.896758, 0.304433, 0.149125, 0.178398, 0.871836], [0.056787, 0.932745, 0.218009, 0.778061, 0.131847, 0.356237], [0.210266, 0.221479, 0.014831, 0.200901, 0.656693, 0.891819], [0.528515, 0.178025, 0.188138, 0.411485, 0.217833, 0.907579], [0.195801, 0.663099, 0.477312, 0.395250, 0.655791, 0.820570], [0.933208, 0.789323, 0.350520, 0.855434, 0.491082, 0.874993], [0.251047, 0.543513, 0.529644, 0.218495, 0.351637, 0.608904], [0.963286, 0.793004, 0.650148, 0.881362, 0.904832, 0.005397], [0.431744, 0.438965, 0.044544, 0.834968, 0.330614, 0.451282], [0.234845, 0.328576, 0.388284, 0.339183, 0.206086, 0.600034], [0.512783, 0.961787, 0.959109, 0.632098, 0.910614, 0.912025], [0.454168, 0.743189, 0.834284, 0.955817, 0.072172, 0.523068], [0.696968, 0.720236, 0.341060, 0.054580, 0.045599, 0.549192], [0.272955, 0.318845, 0.700767, 0.426325, 0.895755, 0.843128], [0.992189, 0.332899, 0.272784, 0.019284, 0.073711, 0.434800], [0.154276, 0.639611, 0.924641, 0.587242, 0.358453, 0.548022], [0.021506, 0.450392, 0.515150, 0.032232, 0.650223, 0.849384], [0.316499, 0.513234, 0.958219, 0.843587, 0.125408, 0.836643], [0.538587, 0.261750, 0.732136, 0.030271, 0.893345, 0.270532], [0.987469, 0.708780, 0.446487, 0.968784, 0.734448, 0.788229], [0.353358, 0.135036, 0.249018, 0.565029, 0.740519, 0.250807], [0.810372, 0.656510, 0.472093, 0.225741, 0.420513, 0.202519], [0.848128, 0.551586, 0.513140, 0.956164, 0.483389, 0.404478], [0.292239, 0.297077, 0.934202, 0.468329, 0.872274, 0.992632], [0.828869, 0.534749, 0.716451, 0.405855, 0.164485, 0.531068], [0.130616, 0.757677, 0.284500, 0.438300, 0.957643, 0.725899], [0.503542, 0.640368, 0.381914, 0.847206, 0.134660, 0.762294], [0.653851, 0.646544, 0.436036, 0.944225, 0.310369, 0.392362], [0.539397, 0.027168, 0.697972, 0.209293, 0.992890, 0.008113], [0.902045, 0.171034, 0.194924, 0.620057, 0.002203, 0.557433], [0.802612, 0.085835, 0.380626, 0.492568, 0.238166, 0.961837], [0.466993, 0.647847, 0.113397, 0.015357, 0.928904, 0.166425], [0.892021, 0.869756, 0.681364, 0.129555, 0.394682, 0.745036], [0.060675, 0.869904, 0.757236, 0.220765, 0.615988, 0.754288], [0.031815, 0.340961, 0.455958, 0.529616, 0.840036, 0.365200], [0.834595, 0.603639, 0.745330, 0.085080, 0.184636, 0.238718], [0.575681, 0.250761, 0.874497, 0.870401, 0.854591, 0.968971], [0.3, 0.7, 0.4, 0.1, 0.258563, 0.932004], [0.2, 0.9, 0.7, 0.2, 0.375076, 0.154363], [0.4, 0.4, 0.6, 0.9, 0.579466, 0.524775]]) f = np.array( [109.6, 132.4, 558.2, 158.0, 6.2, 205.4, 593.9, 2.4, 399.8, 395.9, 212.6, 976.1, 104.4, 552.1, 436.3, 837.1, 283.7, 779.7, 392.1, 85.8, 885.1, 401.5, 367.5, 694.4, 691.6, 933.1, 590.7, 246.2, 370.0, 54.3, 719.4, 95.2, 276.0, 829.1, 613.6, 242.8, 424.6, 320.6, 666.1, 479.2, 420.0, 956.6, 241.0, 21.1, 169.8, 178.1, 394.4, -1000, -1000, -1000]) dimension = 6 var_lower = np.array([0 for i in range(6)]) var_upper = np.array([10 for i in range(6)]) optimum_point = np.array([03., 07., 04., 01., 02.58563, 09.32004]) optimum_value = -1000 var_type = np.array(['I'] * 4 + ['R'] * 2)
# -- end class
[docs]class schoen_10_1_int: """ schoen function of dimension 10 with 50 stationary points. Mixed integer version. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==10) numerator = 0.0 denominator = 0.0 dist = np.sum((x/10 - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.914871, 0.765230, 0.139426, 0.617466, 0.823635, 0.794003, 0.801171, 0.568811, 0.279434, 0.540422], [0.976983, 0.593277, 0.701115, 0.585262, 0.669106, 0.272906, 0.177127, 0.143389, 0.561181, 0.018744], [0.385208, 0.984106, 0.390066, 0.905970, 0.169600, 0.191291, 0.564157, 0.689910, 0.857031, 0.715390], [0.975998, 0.536904, 0.819333, 0.801793, 0.564454, 0.336124, 0.654190, 0.044197, 0.717416, 0.465807], [0.750519, 0.415284, 0.258927, 0.736115, 0.597744, 0.763716, 0.747691, 0.969633, 0.188117, 0.964954], [0.412888, 0.671756, 0.380214, 0.558595, 0.768370, 0.998320, 0.212183, 0.606757, 0.531315, 0.303569], [0.196682, 0.139879, 0.108608, 0.736975, 0.755971, 0.021390, 0.852398, 0.188596, 0.920133, 0.045012], [0.956270, 0.729258, 0.397664, 0.013146, 0.519861, 0.300011, 0.008396, 0.820346, 0.176841, 0.402298], [0.126432, 0.872346, 0.923581, 0.297492, 0.992744, 0.486525, 0.915493, 0.589980, 0.498242, 0.989945], [0.697409, 0.026641, 0.875467, 0.503039, 0.563285, 0.096769, 0.933643, 0.884419, 0.585825, 0.395465], [0.494783, 0.824300, 0.153326, 0.202651, 0.579815, 0.416954, 0.707624, 0.497959, 0.568876, 0.812841], [0.126963, 0.757337, 0.648583, 0.787445, 0.822586, 0.401155, 0.301350, 0.562707, 0.744074, 0.088372], [0.293611, 0.835864, 0.925111, 0.760322, 0.729456, 0.096840, 0.651466, 0.975836, 0.691353, 0.038384], [0.999250, 0.916829, 0.205699, 0.027241, 0.156956, 0.206598, 0.175242, 0.811219, 0.660192, 0.119865], [0.387978, 0.665180, 0.774376, 0.135223, 0.766238, 0.380668, 0.058279, 0.727506, 0.991527, 0.345759], [0.299341, 0.066231, 0.680305, 0.392230, 0.319985, 0.698292, 0.100236, 0.394973, 0.096232, 0.362943], [0.281548, 0.860858, 0.647870, 0.981650, 0.110777, 0.836484, 0.697387, 0.659942, 0.694425, 0.434991], [0.606706, 0.052287, 0.858208, 0.738885, 0.158495, 0.002367, 0.933796, 0.112986, 0.647308, 0.421573], [0.776505, 0.101364, 0.610406, 0.275033, 0.548409, 0.998967, 0.536743, 0.943903, 0.960993, 0.251672], [0.371347, 0.491122, 0.772374, 0.860206, 0.752131, 0.338591, 0.826739, 0.312111, 0.768881, 0.862719], [0.866886, 0.358220, 0.131205, 0.276334, 0.334111, 0.429525, 0.752197, 0.167524, 0.437764, 0.162916], [0.584246, 0.511215, 0.659647, 0.349220, 0.954428, 0.477982, 0.386041, 0.813944, 0.753530, 0.983276], [0.697327, 0.499835, 0.530487, 0.599958, 0.497257, 0.998852, 0.106262, 0.186978, 0.887481, 0.749174], [0.041611, 0.278918, 0.999095, 0.825221, 0.218320, 0.383711, 0.077041, 0.642061, 0.668906, 0.758298], [0.072437, 0.592862, 0.040655, 0.446330, 0.651659, 0.055738, 0.631924, 0.890039, 0.192989, 0.741054], [0.533886, 0.135079, 0.787647, 0.593408, 0.749228, 0.749045, 0.190386, 0.755508, 0.465321, 0.465156], [0.748843, 0.696419, 0.882124, 0.843895, 0.858057, 0.220107, 0.350310, 0.102947, 0.453576, 0.875940], [0.560231, 0.580247, 0.381834, 0.807535, 0.184636, 0.615702, 0.628408, 0.081783, 0.793384, 0.233639], [0.384827, 0.589138, 0.630013, 0.634506, 0.630712, 0.521293, 0.494486, 0.681700, 0.288512, 0.319808], [0.721978, 0.452289, 0.426726, 0.323106, 0.781584, 0.999325, 0.043670, 0.884560, 0.520936, 0.430684], [0.810388, 0.624041, 0.811624, 0.105973, 0.199807, 0.440644, 0.864152, 0.282280, 0.397116, 0.499932], [0.973889, 0.677797, 0.080137, 0.549098, 0.625445, 0.577342, 0.538642, 0.388039, 0.552273, 0.793807], [0.365176, 0.228017, 0.623500, 0.084450, 0.177343, 0.910108, 0.632719, 0.521458, 0.894843, 0.707893], [0.502069, 0.622312, 0.958019, 0.744999, 0.515695, 0.407885, 0.590739, 0.736542, 0.297555, 0.237955], [0.313835, 0.090014, 0.336274, 0.433171, 0.330864, 0.105751, 0.160367, 0.651934, 0.207260, 0.293577], [0.886072, 0.592935, 0.498116, 0.321835, 0.011216, 0.543911, 0.506579, 0.216779, 0.406812, 0.261349], [0.789947, 0.881332, 0.696597, 0.742955, 0.252224, 0.718157, 0.188217, 0.371208, 0.178640, 0.347720], [0.482759, 0.663618, 0.622706, 0.036170, 0.278854, 0.088147, 0.482808, 0.134824, 0.028828, 0.944537], [0.184705, 0.662346, 0.917194, 0.186490, 0.918392, 0.955111, 0.636015, 0.447595, 0.813716, 0.372839], [0.231741, 0.637199, 0.745257, 0.201568, 0.697485, 0.897022, 0.239791, 0.495219, 0.153831, 0.387172], [0.198061, 0.194102, 0.550259, 0.751804, 0.503973, 0.034252, 0.788267, 0.731760, 0.118338, 0.057247], [0.068470, 0.545180, 0.668845, 0.714932, 0.688014, 0.203845, 0.146138, 0.109039, 0.470214, 0.441797], [0.085180, 0.142394, 0.938665, 0.071422, 0.946796, 0.697832, 0.472400, 0.161384, 0.325715, 0.122550], [0.637672, 0.986961, 0.969438, 0.989508, 0.381318, 0.800871, 0.012035, 0.326007, 0.459124, 0.645374], [0.147210, 0.954608, 0.361146, 0.094699, 0.092327, 0.301664, 0.478447, 0.008274, 0.680576, 0.004184], [0.768792, 0.812618, 0.915766, 0.029070, 0.506944, 0.457816, 0.839167, 0.024706, 0.990756, 0.088779], [0.872678, 0.601536, 0.948347, 0.621023, 0.415621, 0.289340, 0.291338, 0.190461, 0.664007, 0.583513], [0.6, 0.7, 0.0, 0.355500, 0.294700, 0.3, 0.5, 0.5, 0.759223, 0.508432], [0.7, 0.0, 0.4, 0.300586, 0.576927, 0.1, 0.2, 0.9, 0.614178, 0.092903], [0.7, 0.3, 0.7, 0.899419, 0.749499, 0.6, 0.6, 0.0, 0.973726, 0.168336]]) f = np.array( [799.1, 396.8, 370.3, 400.2, 239.7, 678.8, 868.9, 564.4, 681.6, 153.0, 760.7, 562.9, 434.9, 579.2, 260.6, 88.5, 601.3, 754.8, 894.8, 672.8, 633.7, 921.8, 43.2, 286.2, 945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8, 555.8, 136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3, 111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2, -1000, -1000, -1000]) dimension = 10 var_lower = np.array([0 for i in range(10)]) var_upper = np.array([10 for i in range(10)]) optimum_point = np.array([06., 07., 00., 03.55500, 02.94700, 03., 05., 05., 07.59223, 05.08432]) optimum_value = -1000 var_type = np.array(['I'] * 3 + ['R'] * 2 + ['I'] * 3 + ['R'] * 2)
# -- end class
[docs]class schoen_10_2_int: """ schoen function of dimension 10 with 50 stationary points. Mixed integer version. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==10) numerator = 0.0 denominator = 0.0 dist = np.sum((x/10 - cls.z)**2, axis=1) for i in range(50): prod = 1.0 for j in range(50): if (i != j): prod *= dist[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.131461, 0.965235, 0.046134, 0.983011, 0.719813, 0.827542, 0.662422, 0.570546, 0.578707, 0.013264], [0.068454, 0.682785, 0.582736, 0.434517, 0.310613, 0.869876, 0.993949, 0.629156, 0.590599, 0.356378], [0.632837, 0.961665, 0.015079, 0.378878, 0.805608, 0.685239, 0.528658, 0.752934, 0.717790, 0.374865], [0.286191, 0.912944, 0.400358, 0.902532, 0.324887, 0.850063, 0.483503, 0.764147, 0.147726, 0.159851], [0.303483, 0.754790, 0.090527, 0.653764, 0.164323, 0.402931, 0.593477, 0.448444, 0.711483, 0.113869], [0.057398, 0.302029, 0.596351, 0.565466, 0.694204, 0.974864, 0.323989, 0.298493, 0.859391, 0.238714], [0.139267, 0.214902, 0.608462, 0.297987, 0.499810, 0.578553, 0.548077, 0.208442, 0.046162, 0.246848], [0.680420, 0.783181, 0.828103, 0.475810, 0.680401, 0.188455, 0.015200, 0.650103, 0.762389, 0.063985], [0.409243, 0.600740, 0.302354, 0.588411, 0.436291, 0.294790, 0.701477, 0.994162, 0.433749, 0.535320], [0.077949, 0.530126, 0.869737, 0.387811, 0.705317, 0.632911, 0.442087, 0.082918, 0.441383, 0.591975], [0.622628, 0.054964, 0.020475, 0.145616, 0.163873, 0.321546, 0.282867, 0.743494, 0.750568, 0.732386], [0.538574, 0.066932, 0.225204, 0.290045, 0.613242, 0.529365, 0.384018, 0.946557, 0.974384, 0.425297], [0.108817, 0.850094, 0.886417, 0.161581, 0.082973, 0.506354, 0.589650, 0.638991, 0.045151, 0.688464], [0.917742, 0.365119, 0.484176, 0.173231, 0.210253, 0.303688, 0.992141, 0.023109, 0.977178, 0.535146], [0.183469, 0.198085, 0.511596, 0.275610, 0.753700, 0.437328, 0.986237, 0.028654, 0.767921, 0.997910], [0.484908, 0.759122, 0.577318, 0.359934, 0.935730, 0.617833, 0.770173, 0.311175, 0.004831, 0.157457], [0.634077, 0.236972, 0.016427, 0.261753, 0.349712, 0.245870, 0.412238, 0.523557, 0.985327, 0.094060], [0.477875, 0.803438, 0.496728, 0.848920, 0.497386, 0.938203, 0.279797, 0.287076, 0.395184, 0.980546], [0.450215, 0.193712, 0.975838, 0.103925, 0.077410, 0.709573, 0.253072, 0.311723, 0.885664, 0.204528], [0.557312, 0.815198, 0.097914, 0.539142, 0.826048, 0.130070, 0.049858, 0.223634, 0.076387, 0.831224], [0.927559, 0.324916, 0.563393, 0.209281, 0.344394, 0.953384, 0.298679, 0.890637, 0.966615, 0.380006], [0.026403, 0.997573, 0.479163, 0.379686, 0.687928, 0.832002, 0.214326, 0.348248, 0.073151, 0.062646], [0.726869, 0.911171, 0.961920, 0.874884, 0.216867, 0.076966, 0.776240, 0.495777, 0.963492, 0.425246], [0.357483, 0.486330, 0.759177, 0.748362, 0.889904, 0.350438, 0.232983, 0.823613, 0.792656, 0.441264], [0.875826, 0.359459, 0.214808, 0.425850, 0.493328, 0.456048, 0.523145, 0.504154, 0.090128, 0.472437], [0.813400, 0.808407, 0.427211, 0.902524, 0.210376, 0.490662, 0.915939, 0.169439, 0.078865, 0.485371], [0.877334, 0.982207, 0.679085, 0.486335, 0.940715, 0.585964, 0.289279, 0.694886, 0.172625, 0.201457], [0.141599, 0.476124, 0.762246, 0.067045, 0.411332, 0.813196, 0.134138, 0.302390, 0.856145, 0.349243], [0.346912, 0.082142, 0.787442, 0.857465, 0.371129, 0.448550, 0.967943, 0.775340, 0.943681, 0.656127], [0.619267, 0.547196, 0.470422, 0.141566, 0.584198, 0.952226, 0.196462, 0.629549, 0.685469, 0.824365], [0.014209, 0.789812, 0.836373, 0.186139, 0.493840, 0.710697, 0.910033, 0.368287, 0.865953, 0.140892], [0.482763, 0.072574, 0.026730, 0.143687, 0.739505, 0.419649, 0.013683, 0.662644, 0.785254, 0.234561], [0.821421, 0.844100, 0.153937, 0.671762, 0.290469, 0.631347, 0.591435, 0.498966, 0.043395, 0.176771], [0.404994, 0.496656, 0.951774, 0.497357, 0.715401, 0.023378, 0.493045, 0.342766, 0.117055, 0.698590], [0.985857, 0.831692, 0.423498, 0.215757, 0.341260, 0.790760, 0.941186, 0.716883, 0.062641, 0.582012], [0.676905, 0.280897, 0.800638, 0.898913, 0.735995, 0.592412, 0.433021, 0.432772, 0.874477, 0.112375], [0.377382, 0.118941, 0.529204, 0.419434, 0.673891, 0.074904, 0.129868, 0.819585, 0.220536, 0.353223], [0.233415, 0.136703, 0.487256, 0.777498, 0.901915, 0.612402, 0.778635, 0.436718, 0.484520, 0.641969], [0.273297, 0.670196, 0.344525, 0.669751, 0.180230, 0.530085, 0.393284, 0.326043, 0.260840, 0.364690], [0.931213, 0.676123, 0.912481, 0.898258, 0.001887, 0.408306, 0.917215, 0.496959, 0.287951, 0.562511], [0.047196, 0.780338, 0.895994, 0.088169, 0.552425, 0.130790, 0.308504, 0.232476, 0.187952, 0.105936], [0.343517, 0.356222, 0.416018, 0.450278, 0.487765, 0.040510, 0.592363, 0.771635, 0.577849, 0.315843], [0.527759, 0.529503, 0.210423, 0.756794, 0.892670, 0.339374, 0.445837, 0.363265, 0.432114, 0.942045], [0.560107, 0.110906, 0.115725, 0.761393, 0.969105, 0.921166, 0.455014, 0.593512, 0.111887, 0.217300], [0.463382, 0.635591, 0.329484, 0.573602, 0.492558, 0.474174, 0.371906, 0.850465, 0.467637, 0.261373], [0.033051, 0.422543, 0.294155, 0.699026, 0.846231, 0.047967, 0.686826, 0.480273, 0.463181, 0.345601], [0.285473, 0.723925, 0.202386, 0.671909, 0.685277, 0.993969, 0.415329, 0.155218, 0.233826, 0.088752], [0.0, 0.6, 0.8, 0.677718, 0.961189, 0.2, 0.8, 0.8, 0.524970, 0.815489], [0.5, 0.5, 0.1, 0.156163, 0.274566, 0.5, 0.8, 0.8, 0.656166, 0.964211], [0.1, 0.9, 0.0, 0.178217, 0.408438, 0.2, 0.1, 0.2, 0.051758, 0.906712]]) f = np.array( [90.4, 830.9, 52.7, 375.2, 289.7, 244.1, 470.2, 111.7, 968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9, 11.0, 454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3, 729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0, 501.2, 568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6, 127.2, 943.4, 437.2, 199.7, 415.4, 966.0, 362.3, -1000, -1000, -1000]) dimension = 10 var_lower = np.array([0 for i in range(10)]) var_upper = np.array([10 for i in range(10)]) optimum_point = np.array([00., 06., 08., 06.77718, 09.61189, 02., 08., 08., 05.24970, 08.15489]) optimum_value = -1000 var_type = np.array(['I'] * 3 + ['R'] * 2 + ['I'] * 3 + ['R'] * 2)
# -- end class
[docs]class branin_cat: """ Branin function of the Dixon-Szego test set, with categorical vars. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==3) if (x[2] == 0): fun = lambda x : np.cos(x) elif (x[2] == 1): fun = lambda x : np.sin(x) elif (x[2] == 2): fun = lambda x : (np.cos(x + np.pi/4))**2 elif (x[2] == 3): fun = lambda x : (np.sin(x + np.pi/4))**2 value = ((x[1] - (5.1/(4*np.pi*np.pi))*x[0]*x[0] + 5/np.pi*x[0] - 6)**2 + 10*(1-1/(8*np.pi)) * fun(x[0]) +10) return(value)
dimension = 3 var_lower = np.array([-5, 0, 0]) var_upper = np.array([10, 15, 3]) optimum_point = np.array([9.42477796, 2.47499998, 0]) additional_optima = np.array([ [-3.14159265, 12.27500000, 0], [3.14159265, 2.27500000, 0] ]) optimum_value = 0.397887357729739 var_type = np.array(['R', 'R', 'C'])
# -- end class
[docs]class hartman3_cat: """ Hartman3 function of the Dixon-Szego test set, with categorical vars. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) value = -math.fsum([cls.c[int(x[3]), i] * np.exp(-math.fsum([cls.a[j][i]* (x[j] - cls.p[j][i])**2 for j in range(3)])) for i in range(4) ]) return(value)
a = np.array([ [3.0, 0.1, 3.0, 0.1], [10.0, 10.0, 10.0, 10.0], [30.0, 35.0, 30.0, 35.0] ]) p = np.array([ [0.36890, 0.46990, 0.10910, 0.03815], [0.11700, 0.43870, 0.87320, 0.57430], [0.26730, 0.74700, 0.55470, 0.88280] ]) c = np.array([[3.2, 2.5, 0.2, 0.7], [1.0, 1.2, 3.0, 3.2], [2.2, 1.2, 3.1, 1.7], [0.1, 2.1, 0.3, 3.7], [1.8, 0.4, 3.1, 2.4]]) dimension = 4 var_lower = np.array([0, 0, 0, 0]) var_upper = np.array([1, 1, 1, 4]) optimum_point = np.array([0.155995, 0.536521, 0.843994, 3]) optimum_value = -4.822787424687719 var_type = np.array(['R', 'R', 'R', 'C'])
# -- end class
[docs]class hartman6_cat: """ Hartman6 function of the Dixon-Szego test set, with categorical vars. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==7) value = -math.fsum([cls.c[int(x[0]), i] * np.exp(-math.fsum([cls.a[j][i]* (x[j+1] - cls.p[j][i])**2 for j in range(6)])) for i in range(4) ]) return(value)
a = np.array([ [10.00, 0.05, 3.00, 17.00], [3.00, 10.00, 3.50, 8.00], [17.00, 17.00, 1.70, 0.05], [3.50, 0.10, 10.00, 10.00], [1.70, 8.00, 17.00, 0.10], [8.00, 14.00, 8.00, 14.00] ]) p = np.array([ [0.1312, 0.2329, 0.2348, 0.4047], [0.1696, 0.4135, 0.1451, 0.8828], [0.5569, 0.8307, 0.3522, 0.8732], [0.0124, 0.3736, 0.2883, 0.5743], [0.8283, 0.1004, 0.3047, 0.1091], [0.5886, 0.9991, 0.6650, 0.0381] ]) c = np.array([[1.0, 1.2, 3.0, 3.2], [3.2, 2.5, 0.2, 0.7], [2.2, 1.2, 3.1, 1.7], [0.1, 2.1, 0.3, 3.7], [1.8, 0.4, 3.1, 2.4]]) dimension = 7 var_lower = np.array([0, 0, 0, 0, 0, 0, 0]) var_upper = np.array([4, 1, 1, 1, 1, 1, 1]) optimum_point = np.array([2, 0.177401, 0.153512, 0.516698, 0.256499, 0.323092, 0.646352]) optimum_value = -3.96231691936822 var_type = np.array(['C'] + ['R'] * 6)
# -- end class
[docs]class ex8_1_1_cat: """ ex8_1_1 function of the GlobalLib test set. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) if (x[2] == 0): fun1 = lambda x : np.sin(x) elif (x[2] == 1): fun1 = lambda x : np.cos(x) elif (x[2] == 2): fun1 = lambda x : (np.cos(x + np.pi/4))**2 elif (x[2] == 3): fun1 = lambda x : (np.sin(x + np.pi/4))**2 if (x[3] == 0): fun2 = lambda x : (np.sin(x + np.pi/4))**2 elif (x[3] == 1): fun2 = lambda x : np.sin(x) elif (x[3] == 2): fun2 = lambda x : (np.cos(x + np.pi/4))**2 elif (x[3] == 3): fun2 = lambda x : np.cos(x) value = fun1(x[0])*fun2(x[1]) - x[0]/(x[1]**2+1) return(value)
dimension = 4 var_lower = np.array([-1, -1, 0, 0]) var_upper = np.array([2, 1, 3, 3]) optimum_point = np.array([2.0, -0.00030, 1, 3]) optimum_value = -2.4161466378205514 var_type = np.array(['R'] * 2 + ['C', 'C'])
# -- end class
[docs]class schoen_10_1_cat: """ schoen function of dimension 10 with categorical variables. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==12) numerator = 0.0 denominator = 0.0 # Categorical variable determining function for the first 25 # points. We want these functions to be nonnegative on # distances. if (x[0] == 0): fun1 = lambda x : x**2 elif (x[0] == 1): fun1 = lambda x : np.abs(x + 50) elif (x[0] == 2): fun1 = lambda x : np.log(x + 10) elif (x[0] == 3): fun1 = lambda x : (np.sin(x))**2 if (x[1] == 0): fun2 = lambda x : np.log(x + 10) elif (x[1] == 1): fun2 = lambda x : np.abs(x + 50) elif (x[1] == 2): fun2 = lambda x : x**2 elif (x[1] == 3): fun2 = lambda x : (np.sin(x))**2 dist1 = np.sum(fun1(x[2:] - cls.z), axis=1) dist2 = np.sum(fun2(x[2:] - cls.z), axis=1) for i in range(25): prod = 1.0 for j in range(50): if (i != j): prod *= dist1[j] numerator += cls.f[i]*prod denominator += prod for i in range(25, 50): prod = 1.0 for j in range(50): if (i != j): prod *= dist2[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.914871, 0.765230, 0.139426, 0.617466, 0.823635, 0.794003, 0.801171, 0.568811, 0.279434, 0.540422], [0.976983, 0.593277, 0.701115, 0.585262, 0.669106, 0.272906, 0.177127, 0.143389, 0.561181, 0.018744], [0.385208, 0.984106, 0.390066, 0.905970, 0.169600, 0.191291, 0.564157, 0.689910, 0.857031, 0.715390], [0.975998, 0.536904, 0.819333, 0.801793, 0.564454, 0.336124, 0.654190, 0.044197, 0.717416, 0.465807], [0.750519, 0.415284, 0.258927, 0.736115, 0.597744, 0.763716, 0.747691, 0.969633, 0.188117, 0.964954], [0.412888, 0.671756, 0.380214, 0.558595, 0.768370, 0.998320, 0.212183, 0.606757, 0.531315, 0.303569], [0.196682, 0.139879, 0.108608, 0.736975, 0.755971, 0.021390, 0.852398, 0.188596, 0.920133, 0.045012], [0.956270, 0.729258, 0.397664, 0.013146, 0.519861, 0.300011, 0.008396, 0.820346, 0.176841, 0.402298], [0.126432, 0.872346, 0.923581, 0.297492, 0.992744, 0.486525, 0.915493, 0.589980, 0.498242, 0.989945], [0.697409, 0.026641, 0.875467, 0.503039, 0.563285, 0.096769, 0.933643, 0.884419, 0.585825, 0.395465], [0.494783, 0.824300, 0.153326, 0.202651, 0.579815, 0.416954, 0.707624, 0.497959, 0.568876, 0.812841], [0.126963, 0.757337, 0.648583, 0.787445, 0.822586, 0.401155, 0.301350, 0.562707, 0.744074, 0.088372], [0.293611, 0.835864, 0.925111, 0.760322, 0.729456, 0.096840, 0.651466, 0.975836, 0.691353, 0.038384], [0.999250, 0.916829, 0.205699, 0.027241, 0.156956, 0.206598, 0.175242, 0.811219, 0.660192, 0.119865], [0.387978, 0.665180, 0.774376, 0.135223, 0.766238, 0.380668, 0.058279, 0.727506, 0.991527, 0.345759], [0.299341, 0.066231, 0.680305, 0.392230, 0.319985, 0.698292, 0.100236, 0.394973, 0.096232, 0.362943], [0.281548, 0.860858, 0.647870, 0.981650, 0.110777, 0.836484, 0.697387, 0.659942, 0.694425, 0.434991], [0.606706, 0.052287, 0.858208, 0.738885, 0.158495, 0.002367, 0.933796, 0.112986, 0.647308, 0.421573], [0.776505, 0.101364, 0.610406, 0.275033, 0.548409, 0.998967, 0.536743, 0.943903, 0.960993, 0.251672], [0.371347, 0.491122, 0.772374, 0.860206, 0.752131, 0.338591, 0.826739, 0.312111, 0.768881, 0.862719], [0.866886, 0.358220, 0.131205, 0.276334, 0.334111, 0.429525, 0.752197, 0.167524, 0.437764, 0.162916], [0.584246, 0.511215, 0.659647, 0.349220, 0.954428, 0.477982, 0.386041, 0.813944, 0.753530, 0.983276], [0.697327, 0.499835, 0.530487, 0.599958, 0.497257, 0.998852, 0.106262, 0.186978, 0.887481, 0.749174], [0.041611, 0.278918, 0.999095, 0.825221, 0.218320, 0.383711, 0.077041, 0.642061, 0.668906, 0.758298], [0.072437, 0.592862, 0.040655, 0.446330, 0.651659, 0.055738, 0.631924, 0.890039, 0.192989, 0.741054], [0.533886, 0.135079, 0.787647, 0.593408, 0.749228, 0.749045, 0.190386, 0.755508, 0.465321, 0.465156], [0.748843, 0.696419, 0.882124, 0.843895, 0.858057, 0.220107, 0.350310, 0.102947, 0.453576, 0.875940], [0.560231, 0.580247, 0.381834, 0.807535, 0.184636, 0.615702, 0.628408, 0.081783, 0.793384, 0.233639], [0.384827, 0.589138, 0.630013, 0.634506, 0.630712, 0.521293, 0.494486, 0.681700, 0.288512, 0.319808], [0.721978, 0.452289, 0.426726, 0.323106, 0.781584, 0.999325, 0.043670, 0.884560, 0.520936, 0.430684], [0.810388, 0.624041, 0.811624, 0.105973, 0.199807, 0.440644, 0.864152, 0.282280, 0.397116, 0.499932], [0.973889, 0.677797, 0.080137, 0.549098, 0.625445, 0.577342, 0.538642, 0.388039, 0.552273, 0.793807], [0.365176, 0.228017, 0.623500, 0.084450, 0.177343, 0.910108, 0.632719, 0.521458, 0.894843, 0.707893], [0.502069, 0.622312, 0.958019, 0.744999, 0.515695, 0.407885, 0.590739, 0.736542, 0.297555, 0.237955], [0.313835, 0.090014, 0.336274, 0.433171, 0.330864, 0.105751, 0.160367, 0.651934, 0.207260, 0.293577], [0.886072, 0.592935, 0.498116, 0.321835, 0.011216, 0.543911, 0.506579, 0.216779, 0.406812, 0.261349], [0.789947, 0.881332, 0.696597, 0.742955, 0.252224, 0.718157, 0.188217, 0.371208, 0.178640, 0.347720], [0.482759, 0.663618, 0.622706, 0.036170, 0.278854, 0.088147, 0.482808, 0.134824, 0.028828, 0.944537], [0.184705, 0.662346, 0.917194, 0.186490, 0.918392, 0.955111, 0.636015, 0.447595, 0.813716, 0.372839], [0.231741, 0.637199, 0.745257, 0.201568, 0.697485, 0.897022, 0.239791, 0.495219, 0.153831, 0.387172], [0.198061, 0.194102, 0.550259, 0.751804, 0.503973, 0.034252, 0.788267, 0.731760, 0.118338, 0.057247], [0.068470, 0.545180, 0.668845, 0.714932, 0.688014, 0.203845, 0.146138, 0.109039, 0.470214, 0.441797], [0.085180, 0.142394, 0.938665, 0.071422, 0.946796, 0.697832, 0.472400, 0.161384, 0.325715, 0.122550], [0.637672, 0.986961, 0.969438, 0.989508, 0.381318, 0.800871, 0.012035, 0.326007, 0.459124, 0.645374], [0.147210, 0.954608, 0.361146, 0.094699, 0.092327, 0.301664, 0.478447, 0.008274, 0.680576, 0.004184], [0.768792, 0.812618, 0.915766, 0.029070, 0.506944, 0.457816, 0.839167, 0.024706, 0.990756, 0.088779], [0.872678, 0.601536, 0.948347, 0.621023, 0.415621, 0.289340, 0.291338, 0.190461, 0.664007, 0.583513], [0.641216, 0.700152, 0.080576, 0.355500, 0.294700, 0.338614, 0.563964, 0.528079, 0.759223, 0.508432], [0.738489, 0.077376, 0.429485, 0.300586, 0.576927, 0.185931, 0.231659, 0.954833, 0.614178, 0.092903], [0.729321, 0.318607, 0.768657, 0.899419, 0.749499, 0.623403, 0.671793, 0.052835, 0.973726, 0.168336]]) f = np.array( [-1000, -1000, -1000, 799.1, 396.8, 370.3, 400.2, 239.7, 678.8, 868.9, 564.4, 681.6, 153.0, 760.7, 562.9, 434.9, 579.2, 260.6, 88.5, 601.3, 754.8, 894.8, 672.8, 633.7, 921.8, 43.2, 286.2, 945.5, 716.0, 72.7, 631.2, 640.3, 425.1, 825.8, 555.8, 136.9, 805.7, 786.5, 400.0, 856.4, 548.0, 510.8, 52.3, 111.6, 686.6, 888.2, 315.4, 333.9, 61.5, 755.2]) dimension = 12 var_lower = np.array([0, 0] + [0 for i in range(10)]) var_upper = np.array([3, 3] + [1 for i in range(10)]) optimum_point = np.array([0, 2, 0.914871, 0.765230, 0.139426, 0.617466, 0.823635, 0.794003, 0.801171, 0.568811, 0.279434, 0.540422]) optimum_value = -1000 var_type = np.array(['C', 'C'] + ['R'] * 10)
# -- end class
[docs]class schoen_10_2_cat: """ schoen function of dimension 10 with categorical variables. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==12) numerator = 0.0 denominator = 0.0 # Categorical variable determining function for the first 25 # points. We want these functions to be nonnegative on # distances. if (x[0] == 0): fun1 = lambda x : x**2 elif (x[0] == 1): fun1 = lambda x : np.abs(x + 50) elif (x[0] == 2): fun1 = lambda x : np.log(x + 10) elif (x[0] == 3): fun1 = lambda x : (np.sin(x))**2 if (x[1] == 0): fun2 = lambda x : np.log(x + 10) elif (x[1] == 1): fun2 = lambda x : np.abs(x + 50) elif (x[1] == 2): fun2 = lambda x : x**2 elif (x[1] == 3): fun2 = lambda x : (np.sin(x))**2 dist1 = np.sum(fun1(x[2:] - cls.z), axis=1) dist2 = np.sum(fun2(x[2:] - cls.z), axis=1) for i in range(25): prod = 1.0 for j in range(50): if (i != j): prod *= dist1[j] numerator += cls.f[i]*prod denominator += prod for i in range(25, 50): prod = 1.0 for j in range(50): if (i != j): prod *= dist2[j] numerator += cls.f[i]*prod denominator += prod value = numerator/denominator return(value)
z = np.array( [[0.131461, 0.965235, 0.046134, 0.983011, 0.719813, 0.827542, 0.662422, 0.570546, 0.578707, 0.013264], [0.068454, 0.682785, 0.582736, 0.434517, 0.310613, 0.869876, 0.993949, 0.629156, 0.590599, 0.356378], [0.632837, 0.961665, 0.015079, 0.378878, 0.805608, 0.685239, 0.528658, 0.752934, 0.717790, 0.374865], [0.286191, 0.912944, 0.400358, 0.902532, 0.324887, 0.850063, 0.483503, 0.764147, 0.147726, 0.159851], [0.303483, 0.754790, 0.090527, 0.653764, 0.164323, 0.402931, 0.593477, 0.448444, 0.711483, 0.113869], [0.057398, 0.302029, 0.596351, 0.565466, 0.694204, 0.974864, 0.323989, 0.298493, 0.859391, 0.238714], [0.139267, 0.214902, 0.608462, 0.297987, 0.499810, 0.578553, 0.548077, 0.208442, 0.046162, 0.246848], [0.680420, 0.783181, 0.828103, 0.475810, 0.680401, 0.188455, 0.015200, 0.650103, 0.762389, 0.063985], [0.409243, 0.600740, 0.302354, 0.588411, 0.436291, 0.294790, 0.701477, 0.994162, 0.433749, 0.535320], [0.077949, 0.530126, 0.869737, 0.387811, 0.705317, 0.632911, 0.442087, 0.082918, 0.441383, 0.591975], [0.622628, 0.054964, 0.020475, 0.145616, 0.163873, 0.321546, 0.282867, 0.743494, 0.750568, 0.732386], [0.538574, 0.066932, 0.225204, 0.290045, 0.613242, 0.529365, 0.384018, 0.946557, 0.974384, 0.425297], [0.108817, 0.850094, 0.886417, 0.161581, 0.082973, 0.506354, 0.589650, 0.638991, 0.045151, 0.688464], [0.917742, 0.365119, 0.484176, 0.173231, 0.210253, 0.303688, 0.992141, 0.023109, 0.977178, 0.535146], [0.183469, 0.198085, 0.511596, 0.275610, 0.753700, 0.437328, 0.986237, 0.028654, 0.767921, 0.997910], [0.484908, 0.759122, 0.577318, 0.359934, 0.935730, 0.617833, 0.770173, 0.311175, 0.004831, 0.157457], [0.634077, 0.236972, 0.016427, 0.261753, 0.349712, 0.245870, 0.412238, 0.523557, 0.985327, 0.094060], [0.477875, 0.803438, 0.496728, 0.848920, 0.497386, 0.938203, 0.279797, 0.287076, 0.395184, 0.980546], [0.450215, 0.193712, 0.975838, 0.103925, 0.077410, 0.709573, 0.253072, 0.311723, 0.885664, 0.204528], [0.557312, 0.815198, 0.097914, 0.539142, 0.826048, 0.130070, 0.049858, 0.223634, 0.076387, 0.831224], [0.927559, 0.324916, 0.563393, 0.209281, 0.344394, 0.953384, 0.298679, 0.890637, 0.966615, 0.380006], [0.026403, 0.997573, 0.479163, 0.379686, 0.687928, 0.832002, 0.214326, 0.348248, 0.073151, 0.062646], [0.726869, 0.911171, 0.961920, 0.874884, 0.216867, 0.076966, 0.776240, 0.495777, 0.963492, 0.425246], [0.357483, 0.486330, 0.759177, 0.748362, 0.889904, 0.350438, 0.232983, 0.823613, 0.792656, 0.441264], [0.875826, 0.359459, 0.214808, 0.425850, 0.493328, 0.456048, 0.523145, 0.504154, 0.090128, 0.472437], [0.813400, 0.808407, 0.427211, 0.902524, 0.210376, 0.490662, 0.915939, 0.169439, 0.078865, 0.485371], [0.877334, 0.982207, 0.679085, 0.486335, 0.940715, 0.585964, 0.289279, 0.694886, 0.172625, 0.201457], [0.141599, 0.476124, 0.762246, 0.067045, 0.411332, 0.813196, 0.134138, 0.302390, 0.856145, 0.349243], [0.346912, 0.082142, 0.787442, 0.857465, 0.371129, 0.448550, 0.967943, 0.775340, 0.943681, 0.656127], [0.619267, 0.547196, 0.470422, 0.141566, 0.584198, 0.952226, 0.196462, 0.629549, 0.685469, 0.824365], [0.014209, 0.789812, 0.836373, 0.186139, 0.493840, 0.710697, 0.910033, 0.368287, 0.865953, 0.140892], [0.482763, 0.072574, 0.026730, 0.143687, 0.739505, 0.419649, 0.013683, 0.662644, 0.785254, 0.234561], [0.821421, 0.844100, 0.153937, 0.671762, 0.290469, 0.631347, 0.591435, 0.498966, 0.043395, 0.176771], [0.404994, 0.496656, 0.951774, 0.497357, 0.715401, 0.023378, 0.493045, 0.342766, 0.117055, 0.698590], [0.985857, 0.831692, 0.423498, 0.215757, 0.341260, 0.790760, 0.941186, 0.716883, 0.062641, 0.582012], [0.676905, 0.280897, 0.800638, 0.898913, 0.735995, 0.592412, 0.433021, 0.432772, 0.874477, 0.112375], [0.377382, 0.118941, 0.529204, 0.419434, 0.673891, 0.074904, 0.129868, 0.819585, 0.220536, 0.353223], [0.233415, 0.136703, 0.487256, 0.777498, 0.901915, 0.612402, 0.778635, 0.436718, 0.484520, 0.641969], [0.273297, 0.670196, 0.344525, 0.669751, 0.180230, 0.530085, 0.393284, 0.326043, 0.260840, 0.364690], [0.931213, 0.676123, 0.912481, 0.898258, 0.001887, 0.408306, 0.917215, 0.496959, 0.287951, 0.562511], [0.047196, 0.780338, 0.895994, 0.088169, 0.552425, 0.130790, 0.308504, 0.232476, 0.187952, 0.105936], [0.343517, 0.356222, 0.416018, 0.450278, 0.487765, 0.040510, 0.592363, 0.771635, 0.577849, 0.315843], [0.527759, 0.529503, 0.210423, 0.756794, 0.892670, 0.339374, 0.445837, 0.363265, 0.432114, 0.942045], [0.560107, 0.110906, 0.115725, 0.761393, 0.969105, 0.921166, 0.455014, 0.593512, 0.111887, 0.217300], [0.463382, 0.635591, 0.329484, 0.573602, 0.492558, 0.474174, 0.371906, 0.850465, 0.467637, 0.261373], [0.033051, 0.422543, 0.294155, 0.699026, 0.846231, 0.047967, 0.686826, 0.480273, 0.463181, 0.345601], [0.285473, 0.723925, 0.202386, 0.671909, 0.685277, 0.993969, 0.415329, 0.155218, 0.233826, 0.088752], [0.029705, 0.651519, 0.813239, 0.677718, 0.961189, 0.285385, 0.824635, 0.837670, 0.524970, 0.815489], [0.519627, 0.508274, 0.141067, 0.156163, 0.274566, 0.536322, 0.834749, 0.852042, 0.656166, 0.964211], [0.119675, 0.971352, 0.052983, 0.178217, 0.408438, 0.215091, 0.102098, 0.256312, 0.051758, 0.906712]]) f = np.array( [-1000, -1000, -1000, 90.4, 830.9, 52.7, 375.2, 289.7, 244.1, 470.2, 111.7, 968.9, 903.4, 918.5, 820.3, 441.2, 687.5, 836.9, 11.0, 454.5, 929.3, 952.6, 937.2, 870.5, 211.7, 378.4, 320.3, 729.6, 420.8, 213.8, 717.7, 285.4, 522.8, 748.3, 371.0, 501.2, 568.6, 111.9, 645.2, 486.2, 157.0, 968.5, 137.6, 127.2, 943.4, 437.2, 199.7, 415.4, 966.0, 362.3]) dimension = 12 var_lower = np.array([0, 0] + [0 for i in range(10)]) var_upper = np.array([3, 3] + [1 for i in range(10)]) optimum_point = np.array([0, 2, 0.131461, 0.965235, 0.046134, 0.983011, 0.719813, 0.827542, 0.662422, 0.570546, 0.578707, 0.013264]) optimum_value = -1000 var_type = np.array(['C', 'C'] + ['R'] * 10)
# -- end class
[docs]class gear4_cat: """ gear4 function of the MINLPLib test set, with categorical variables """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) if (x[5] == 0): fun = lambda x, y : np.sqrt(x + y) elif (x[5] == 1): fun = lambda x, y : x/y elif (x[5] == 2): fun = lambda x, y : np.log(x * y * 2) elif (x[5] == 3): fun = lambda x, y : x/(y + 10) elif (x[5] == 4): fun = lambda x, y : np.max(x, y) value = -1000000*x[0]*x[1]/(x[2]*x[3]) + 2*x[4] + 144279.32477276 # There is a constraint: # -1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276 >= 0 penalty = 10*max(0,-(-1000000*x[0]*x[1]/(x[2]*x[3]) + x[4] + 144279.32477276)) return(value + penalty)
dimension = 6 var_lower = np.array([12, 12, 12, 12, 0, 0]) var_upper = np.array([60, 60, 60, 60, 100, 4]) optimum_point = np.array([19.0, 16.0, 43.0, 49.0, 1.64342847396619, 1]) optimum_value = 1.6434284739 var_type = np.array(['I'] * 4 + ['R'] + ['C'])
# -- end class
[docs]class nvs07_cat: """ nvs07 function of the MINLPLib test set, with categorical variables. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==4) if (x[3] == 2): fun = lambda x : (x-1)**3 elif (x[3] == 3): fun = lambda x : np.exp(x/50) elif (x[3] == 4): fun = lambda x : x**2 value = 2*fun(x[1]) + x[0] + 5*x[2] # There are two constraints: # x[2]**2 * x[1] + 5*x[2] + 3*x[0] - 10 >= 0 # x[0] - x[2] - 2.66 >= 0 penalty = 0.0 penalty += 10*max(0, -(fun(x[2]) * x[1] + 5*x[2] + 3*x[0] - 10)) penalty += 10*max(0, -(x[0] - x[2] - 2.66)) return(value + penalty)
dimension = 4 var_lower = np.array([0, 0, 0, 2]) var_upper = np.array([200, 200, 200, 4]) optimum_point = np.array([4.0, 0.0, 0.0, 2]) optimum_value = 2.0 var_type = np.array(['I'] * 3 + ['C'])
# -- end class
[docs]class nvs09_cat: """ nvs09 function of the MINLPLib test set with categorical variables """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==11) if (x[10] == 0): fun = np.sqrt elif (x[10] == 1): fun = np.abs elif (x[10] == 2): fun = lambda x : 1/(x-0.9) elif (x[10] == 3): fun = np.log elif (x[10] == 4): fun = lambda x : x - 2 value = ((fun(x[0] - 2))**2 + (fun(10 - x[0]))**2 + (fun(x[1] - 2))**2 + (fun(10 - x[1]))**2 + (fun(x[2] - 2))**2 + (fun(10 - x[2]))**2 + (fun(x[3] - 2))**2 + (fun(10 - x[3]))**2 + (fun(x[4] - 2))**2 + (fun(10 - x[4]))**2 + (fun(x[5] - 2))**2 + (fun(10 - x[5]))**2 + (fun(x[6] - 2))**2 + (fun(10 - x[6]))**2 + (fun(x[7] - 2))**2 + (fun(10 - x[7]))**2 + (fun(x[8] - 2))**2 + (fun(10 - x[8]))**2 + (fun(x[9] - 2))**2 + (fun(10 - x[9]))**2 - (x[0]*x[1]*x[2]*x[3]*x[4]*x[5]*x[6]*x[7]*x[8]*x[9])**0.2) return(value)
dimension = 11 var_lower = np.array([3 for i in range(10)] + [0]) var_upper = np.array([9 for i in range(10)] + [4]) optimum_point = np.array([8, 8, 8, 8, 7, 8, 7, 8, 8, 8, 2]) optimum_value = -53.179649471788274 var_type = np.array(['I'] * 10 + ['C'])
# -- end class
[docs]class st_miqp1_cat: """ st_miqp1 function of the MINLPLib test set, with categorical variables. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==6) value = (50*x[1]*x[1] + 42*x[1] + 50*x[2]*x[2] + 44*x[2] + 50*x[3]*x[3] + 45*x[3] + 50*x[4]*x[4] + 47*x[4] + 50*x[5]*x[5] + 47.5*x[5]) # There is one constraint: # 20*x[0] + 12*x[1] + 11*x[2] + 7*x[3] + 4*x[4] - 40 >= 0 penalty = 100*max(0, -sum(cls.c[int(x[0])][j]*x[j+1] for j in range(5)) + cls.c[int(x[0])][-1]) return(value + penalty)
c = np.array([[20, 12, 11, 7, 4, 40], [8, 23, 12, 10, 10, 35], [12, 25, 30, 4, 22, 60], [18, 3, 15, 33, 25, 45], [27, 11, 9, 20, 13, 32], [9, 31, 25, 9, 27, 42], [14, 23, 18, 12, 33, 37]]) dimension = 6 var_lower = np.array([0, 0, 0, 0, 0, 0]) var_upper = np.array([5, 1, 1, 1, 1, 1]) optimum_point = np.array([4, 1.0, 1.0, 0.0, 0.0, 0.0]) optimum_value = 186.0 var_type = np.array(['C'] + ['I'] * 5)
# -- end class
[docs]class schaeffer_f7_12_1_int_cat: """ Schaeffer F7 function with integer and categorical variables """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==13) if (x[12] == 0): fun = lambda x : np.cos(x) elif (x[12] == 1): fun = lambda x : np.sin(x) elif (x[12] == 2): fun = lambda x : (np.cos(x + np.pi/4))**2 elif (x[12] == 3): fun = lambda x : (np.sin(x + np.pi/4))**2 value = 0 normalizer = 1.0/float(len(x)-2) for i in range(len(x)-2): si = 2**i*np.sqrt((x[i]-cls.optimum_point[i])**2 + (x[i+1]-cls.optimum_point[i+1])**2) value += (normalizer * np.sqrt(si) * (fun(50*si**0.20) + 1))**2 return value - 10
dimension = 13 var_lower = np.array([-50 for i in range(12)] + [0]) var_upper = np.array([50 for i in range(12)] + [3]) optimum_point = np.array([-34.32567, -34.98896, 07.69262, 30.3388, -48.24371, 23.18355, 24.93374, 32.07436, 46.86153, 4, 25, -16, 1]) optimum_value = -10 var_type = np.array(['R'] * 9 + ['I'] * 3 + ['C'])
# -- end class
[docs]class schaeffer_f7_12_2_int_cat: """ Schaeffer F7 function with integer and categorical variables. """
[docs] @classmethod def evaluate(cls, x): assert(len(x)==13) if (x[12] == 0): fun = lambda x : np.cos(x) elif (x[12] == 1): fun = lambda x : np.sin(x) elif (x[12] == 2): fun = lambda x : (np.cos(x + np.pi/4))**2 elif (x[12] == 3): fun = lambda x : (np.sin(x + np.pi/4))**2 value = 0 normalizer = 1.0/float(len(x)-2) for i in range(len(x)-2): si = 2**i*np.sqrt((x[i]-cls.optimum_point[i])**2 + (x[i+1]-cls.optimum_point[i+1])**2) value += (normalizer * np.sqrt(si) * (fun(50*si**0.20) + 1))**2 return value - 10
dimension = 13 var_lower = np.array([-50 for i in range(12)] + [0]) var_upper = np.array([50 for i in range(12)] + [3]) optimum_point = np.array([-08.214, 30.69133, 48.26095, -04.94219, 15.15357, 00.4841, -13.54025, -40.78766, -16.02916, 16, 39, -49, 1]) optimum_value = -10 var_type = np.array(['R'] * 9 + ['I'] * 3 + ['C'])
# -- end class
[docs]class TestBlackBox(RbfoptBlackBox): """A black-box constructed from a known test function. Parameters ---------- name : string The name of the function to be implemented. """ def __init__(self, name): """Constructor. """ try: thismodule = sys.modules[__name__] self._function = getattr(thismodule, name.lower()) except AttributeError: raise ValueError('Function ' + name + ' not implemented')
[docs] def get_dimension(self): return self._function.dimension
[docs] def get_var_lower(self): return self._function.var_lower
[docs] def get_var_upper(self): return self._function.var_upper
[docs] def get_var_type(self): return self._function.var_type
[docs] def evaluate(self, point): return self._function.evaluate(point)
[docs] def evaluate_noisy(self, point): raise NotImplementedError('evaluate_noisy() not implemented')
[docs] def has_evaluate_noisy(self): return False
# -- end class
[docs]class TestNoisyBlackBox(RbfoptBlackBox): """A noisy black-box constructed from a given black-box function. Parameters ---------- blackbox : `RbfoptBlackBox` The black box function to which noise is added. max_rel_error: float Maximum relative error. max_abs_error: float Maximum absolute error. """ def __init__(self, blackbox, max_rel_error = 0.1, max_abs_error = 0.1): """Constructor. """ assert(max_rel_error >= 0.0) assert(max_abs_error >= 0.0) try: # Get the original function if it is one from this module self._function = getattr(blackbox, '_function') except AttributeError: pass self._bb = blackbox self._max_rel_error = max_rel_error self._max_abs_error = max_abs_error
[docs] def get_dimension(self): return self._bb.get_dimension()
[docs] def get_var_lower(self): return self._bb.get_var_lower()
[docs] def get_var_upper(self): return self._bb.get_var_upper()
[docs] def get_var_type(self): return self._bb.get_var_type()
[docs] def evaluate(self, point): return self._bb.evaluate(point)
[docs] def evaluate_noisy(self, point): value = self._bb.evaluate(point) rel_noise = np.random.uniform(-self._max_rel_error, self._max_rel_error) abs_noise = np.random.uniform(-self._max_abs_error, self._max_abs_error) return np.array([value + rel_noise*abs(value) + abs_noise, - abs(rel_noise*abs(value) + abs_noise), + abs(rel_noise*abs(value) + abs_noise)])
[docs] def has_evaluate_noisy(self): return True
# -- end class
[docs]class TestEnlargedBlackBox(RbfoptBlackBox): """A black-box constructed increasing the size of a test function. Construct a black box function from a given function, increasing its dimension by a given factor. The new function is put together from several independent copies of the original function, plus a coupling term. If the dimension muldiplier is `d` and the original function has dimension `n`, the new function has dimension `n*d` and is computed as: .. math:: \sum_{j=1}^{d} a_j f(x_{(j-1)n+1},\dots,x_{jn}) + 0.4 f(g_1(x),\dots,g_n(x)), where `a_j` are random weights that add up to 0.6, and `g_1` through `g_n` are linear functions of a random subset of the variables. These linear function are appropriately scaled and clipped so that we do not exceed the original function bounds. The optimum of the new function stays the same. Finally, all variables are randomly permuted. Parameters ---------- name : string The name of the function to be implemented. dimension_multiplier : int Dimension multiplier """ def __init__(self, name, dimension_multiplier=1): """Constructor. """ assert(dimension_multiplier>=1) try: thismodule = sys.modules[__name__] self._function = getattr(thismodule, name.lower()) except AttributeError: raise ValueError('Function ' + name + ' not implemented') dim = self._function.dimension perm = np.random.permutation(dim * dimension_multiplier) bounds = [] mult_factor = [] shift = [] # For the copy of the function coupling all variables, # pick dimension_multiplier random variables to add together coupling = np.reshape(np.random.permutation(dim*dimension_multiplier), (dim, dimension_multiplier)) for i in range(dim): # The bounds of the sum are just the sum of the lower # and upper bounds of the component variables lb = sum(self._function.var_lower[perm[val] % dim] for val in coupling[i]) ub = sum(self._function.var_upper[perm[val] % dim] for val in coupling[i]) bounds.append([lb, ub]) # The coefficients are computed so that the optimum # stays the same shifted_opt = sum(self._function.optimum_point[perm[val] % dim] for val in coupling[i]) # Check the position of the optimum in the interval ratio = (shifted_opt - lb)/(ub - lb) orig_ratio = ((self._function.optimum_point[i] - self._function.var_lower[i]) / (self._function.var_upper[i] - self._function.var_lower[i])) # The multiplication factor should bring the # transformed optimum to the original optimum if (ratio != 0.0 and orig_ratio != 0.0): mult_factor.append(orig_ratio / ratio) shift.append(0) elif (orig_ratio == 0.0): # The true optimum is at the lower bound. We have to # ensure the transformed point is mapped to it. The # correct ratio would be zero, but to let the point # vary, we change the transformed bound instead. The # "max" in the bound is to prevent errors in case the # shifted optimum is at the upper bound. bounds[-1] = [shifted_opt, max(ub, shifted_opt+1)] mult_factor.append(1.0) shift.append(0) else: # The transformed point is at the lower bound. Ensure # it can reach the true optimum. mult_factor.append(1.0) shift.append(self._function.optimum_point[i] - self._function.var_lower[i]) # Compute weight of each copy of the function int_weights = np.random.randint(1, 10, dimension_multiplier) weight = np.array([0.6*val/sum(int_weights) for val in int_weights] + [0.4]) # Store data necessary for function evaluation self.coupling = coupling self.extra_bounds = np.array(bounds) self.mult_factor = np.array(mult_factor) self.shift = np.array(shift) self.weight = weight self.permutation = perm self.dimension = self._function.dimension self.dimension_multiplier = dimension_multiplier # Compute bounds and variable types self.var_lower = np.array( [self._function.var_lower[perm[i] % dim] for i in range(dim*dimension_multiplier)]) self.var_upper = np.array( [self._function.var_upper[perm[i] % dim] for i in range(dim*dimension_multiplier)]) self.var_type = np.array( [self._function.var_type[perm[i] % dim] for i in range(dim*dimension_multiplier)]) self.optimum_point = np.array( [self._function.optimum_point[perm[i] % dim] for i in range(dim*dimension_multiplier)]) self.optimum_value = self._function.optimum_value
[docs] def get_dimension(self): return self._function.dimension * self.dimension_multiplier
[docs] def get_var_lower(self): return self.var_lower
[docs] def get_var_upper(self): return self.var_upper
[docs] def get_var_type(self): return self.var_type
[docs] def evaluate(self, point): assert(len(point)==self.dimension*self.dimension_multiplier) # First evaluate each copy of the function on individual variables value = 0.0 for i in range(self.dimension_multiplier): subpoint = np.array([point[np.where(self.permutation == j)[0][0]] for j in range(i*self.dimension, (i+1)*self.dimension)]) value += self.weight[i]*self._function.evaluate(subpoint) # Add the coupling term subpoint = np.zeros(self.dimension) for i in range(self.dimension): subpoint[i] = np.sum(point[self.coupling[i]]) subpoint = (self._function.var_lower + self.shift + self.mult_factor * (subpoint - self.extra_bounds[:, 0]) / (self.extra_bounds[:, 1] - self.extra_bounds[:, 0]) * (self._function.var_upper - self._function.var_lower)) subpoint = np.clip(subpoint, a_min=self._function.var_lower, a_max=self._function.var_upper) for (i, vart) in enumerate(self._function.var_type): if (vart == 'C'): locations = np.where( np.mod(self.permutation, self.dimension) == i)[0] subpoint[i] = np.argmax( np.bincount(point[locations].astype(np.int_))) if (vart == 'I'): subpoint[i] = np.round(subpoint[i]) value += self.weight[-1] * self._function.evaluate(subpoint) return value
[docs] def evaluate_noisy(self, point): raise NotImplementedError('evaluate_noisy() not implemented')
[docs] def has_evaluate_noisy(self): return False
# -- end class