英文:
python - optimisation with specific values to chose from in my bounds
问题
以下是代码的翻译部分:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def objective(x):
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
return x1*x4*(x1+x2+x3)+x3
def const1(x):
return x[0]*x[1]*x[2]*x[3]-25
def const2(x):
sum_sq = 40
for i in range(4):
sum_sq = sum_sq - x[i]**2
return sum_sq
x0 = [1,5,5,1]
b = (1,5)
bnds = (b,b,b,b)
cons1 = {'type':'ineq','fun':const1}
cons2 = {'type':'eq','fun':const2}
cons = [cons1,cons2]
sol = minimize(objective,x0,method='SLSQP',bounds=bnds,constraints=cons)
print(sol)
关于你的问题,如果你希望你的变量只能取特定的值,比如3、4或5,你可以调整你的问题设置。在这种情况下,你需要重新定义你的变量的取值范围(bounds)。以下是修改后的代码示例:
from itertools import product
# 定义变量的可能取值
values = [3, 4, 5]
# 生成所有可能的组合
combinations = list(product(values, repeat=4))
# 创建一个空列表来存储新的边界
new_bounds = []
# 根据可能的组合生成新的边界
for combo in combinations:
new_bounds.append(tuple(combo))
# 打印新的边界
print(new_bounds)
这将生成一个新的边界列表,其中每个元组代表一个可能的组合,其中每个变量可以取3、4或5中的一个值。你可以将这个新的边界列表用于你的优化问题。
英文:
below are the codes
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def objective(x):
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
return x1*x4*(x1+x2+x3)+x3
def const1(x):
return x[0]*x[1]*x[2]*x[3]-25
def const2(x):
sum_sq = 40
for i in range(4):
sum_sq = sum_sq - x[i]**2
return sum_sq
x0 = [1,5,5,1]
b = (1,5)
bnds = (b,b,b,b)
cons1 = {'type':'ineq','fun':const1}
cons2 = {'type':'eq','fun':const2}
cons = [cons1,cons2]
sol = minimize(objective,x0,method='SLSQP',bounds=bnds,constraints=cons)
print(sol)
I have defined my bounds here between 1 and 5 (b = (1,5))
but, what if I wanted my variables to have only specific values i.e. either 3,4 or 5. i.e. each of the 4 variables can have values 3,4 or 5. Is it possible?
答案1
得分: 0
这是一个MINLP(混合整数非线性规划)的示例。据我所知,唯一支持这种方法的SciPy优化方法是differential_evolution
。
您当前的sum_sq
约束条件如果严格要求等于40,则会失败。需要放宽整数解。
提供您的雅可比矩阵,更正您不正确的边界(您有两个冲突的下限描述,分别为1和3),更正您不正确的初始值,以使它们与您的边界兼容,然后它可以正常工作:
import numpy as np
from scipy.optimize import check_grad, differential_evolution, minimize, NonlinearConstraint
def objective(x: np.ndarray) -> float | np.ndarray:
x1, x2, x3, x4 = x
return x1*x4*(x1 + x2 + x3) + x3
def jac_obj(x: np.ndarray) -> tuple[float | np.ndarray, ...]:
x1, x2, x3, x4 = x
return (
x4*(x1 + x2 + x3) + x1*x4,
x1*x4,
x1*x4 + 1,
x1*(x1 + x2 + x3),
)
def const_min_prod(x: np.ndarray) -> float | np.ndarray:
return x.prod(axis=0, keepdims=True)
def jac_min_prod(x: np.ndarray) -> tuple[float | np.ndarray, ...]:
x1, x2, x3, x4 = x
return (
x2*x3*x4,
x1 *x3*x4,
x1*x2 *x4,
x1*x2*x3,
)
def const_sum_sq(x: np.ndarray) -> float | np.ndarray:
return (x*x).sum(axis=0, keepdims=True)
def jac_sum_sq(x: np.ndarray) -> np.ndarray:
return 2*x
x0 = np.array((
3,
np.sqrt(40 - 3*9), # sumsq feasibility
3, 3,
))
bounds = ((3, 5),) * x0.size
def constraints(eq_epsilon: float = 0) -> tuple[NonlinearConstraint, ...]:
return (
NonlinearConstraint(
fun=const_min_prod, lb=25, ub=np.inf, jac=jac_min_prod,
),
NonlinearConstraint(
fun=const_sum_sq, lb=40 - eq_epsilon, ub=40 + eq_epsilon, jac=jac_sum_sq,
),
)
assert check_grad(x0=x0, func=objective, grad=jac_obj) < 1e-4
assert check_grad(x0=x0, func=const_min_prod, grad=jac_min_prod) < 1e-4
assert check_grad(x0=x0, func=const_sum_sq, grad=jac_sum_sq) < 1e-4
def continuous_optimize():
sol = minimize(
fun=objective, method='SLSQP', jac=jac_obj,
x0=x0, bounds=bounds, constraints=constraints(),
)
print('Continuous:')
assert sol.success, sol.message
print(sol, end='\n\n')
def discrete_optimize():
sol = differential_evolution(
func=objective, vectorized=True, integrality=True,
x0=x0, bounds=bounds, constraints=constraints(eq_epsilon=3),
)
print('Discrete:')
assert sol.success, sol.message
print(sol)
if __name__ == '__main__':
continuous_optimize()
discrete_optimize()
Continuous:
message: Optimization terminated successfully
success: True
status: 0
fun: 89.44996147917591
x: [ 3.000e+00 3.606e+00 3.000e+00 3.000e+00]
nit: 1
jac: [ 3.782e+01 9.000e+00 1.000e+01 2.882e+01]
nfev: 1
njev: 1
Discrete:
message: Optimization terminated successfully.
success: True
fun: 93.0
x: [ 3.000e+00 4.000e+00 3.000e+00 3.000e+00]
nit: 19
nfev: 20
constr: [array([ 0.000e+00]), array([ 0.000e+00])]
constr_violation: 0.0
maxcv: 0.0
如果您的问题发生更改,需要对differential_evolution
的各种参数进行(大量)调整。
英文:
This is an example of MINLP (mixed-integer nonlinear programming). To my knowledge the only scipy optimization method that supports this is differential_evolution
.
Your current sum_sq
constraint will fail if you strictly enforce that it's equal to 40. That needs to be loosened for the integer solution.
Provide your Jacobians, change your incorrect bounds (you have two conflicting lower bound descriptions of 1 and 3 respectively), change your incorrect initial values so that they're compatible with your bounds, and it works fine:
import numpy as np
from scipy.optimize import check_grad, differential_evolution, minimize, NonlinearConstraint
def objective(x: np.ndarray) -> float | np.ndarray:
x1, x2, x3, x4 = x
return x1*x4*(x1 + x2 + x3) + x3
def jac_obj(x: np.ndarray) -> tuple[float | np.ndarray, ...]:
x1, x2, x3, x4 = x
return (
x4*(x1 + x2 + x3) + x1*x4,
x1*x4,
x1*x4 + 1,
x1*(x1 + x2 + x3),
)
def const_min_prod(x: np.ndarray) -> float | np.ndarray:
return x.prod(axis=0, keepdims=True)
def jac_min_prod(x: np.ndarray) -> tuple[float | np.ndarray, ...]:
x1, x2, x3, x4 = x
return (
x2*x3*x4,
x1 *x3*x4,
x1*x2 *x4,
x1*x2*x3,
)
def const_sum_sq(x: np.ndarray) -> float | np.ndarray:
return (x*x).sum(axis=0, keepdims=True)
def jac_sum_sq(x: np.ndarray) -> np.ndarray:
return 2*x
x0 = np.array((
3,
np.sqrt(40 - 3*9), # sumsq feasibility
3, 3,
))
bounds = ((3, 5),) * x0.size
def constraints(eq_epsilon: float = 0) -> tuple[NonlinearConstraint, ...]:
return (
NonlinearConstraint(
fun=const_min_prod, lb=25, ub=np.inf, jac=jac_min_prod,
),
NonlinearConstraint(
fun=const_sum_sq, lb=40 - eq_epsilon, ub=40 + eq_epsilon, jac=jac_sum_sq,
),
)
assert check_grad(x0=x0, func=objective, grad=jac_obj ) < 1e-4
assert check_grad(x0=x0, func=const_min_prod, grad=jac_min_prod) < 1e-4
assert check_grad(x0=x0, func=const_sum_sq, grad=jac_sum_sq ) < 1e-4
def continuous_optimize():
sol = minimize(
fun=objective, method='SLSQP', jac=jac_obj,
x0=x0, bounds=bounds, constraints=constraints(),
)
print('Continuous:')
assert sol.success, sol.message
print(sol, end='\n\n')
def discrete_optimize():
sol = differential_evolution(
func=objective, vectorized=True, integrality=True,
x0=x0, bounds=bounds, constraints=constraints(eq_epsilon=3),
)
print('Discrete:')
assert sol.success, sol.message
print(sol)
if __name__ == '__main__':
continuous_optimize()
discrete_optimize()
Continuous:
message: Optimization terminated successfully
success: True
status: 0
fun: 89.44996147917591
x: [ 3.000e+00 3.606e+00 3.000e+00 3.000e+00]
nit: 1
jac: [ 3.782e+01 9.000e+00 1.000e+01 2.882e+01]
nfev: 1
njev: 1
Discrete:
message: Optimization terminated successfully.
success: True
fun: 93.0
x: [ 3.000e+00 4.000e+00 3.000e+00 3.000e+00]
nit: 19
nfev: 20
constr: [array([ 0.000e+00]), array([ 0.000e+00])]
constr_violation: 0.0
maxcv: 0.0
This will need (a lot of) tuning for the various parameters of differential_evolution
if and when your problem changes.
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论