Participer au site avec un Tip
Rechercher
 

Améliorations / Corrections

Vous avez des améliorations (ou des corrections) à proposer pour ce document : je vous remerçie par avance de m'en faire part, cela m'aide à améliorer le site.

Emplacement :

Description des améliorations :

Vous êtes un professionnel et vous avez besoin d'une formation ? Sensibilisation à
l'Intelligence Artificielle
Voir le programme détaillé

Contenu du module « scipy.optimize »

Liste des classes du module scipy.optimize

Nom de la classe Description
BFGS Broyden-Fletcher-Goldfarb-Shanno (BFGS) Hessian update strategy. [extrait de BFGS.__doc__]
Bounds Bounds constraint on the variables. [extrait de Bounds.__doc__]
BroydenFirst
HessianUpdateStrategy Interface for implementing Hessian update strategies. [extrait de HessianUpdateStrategy.__doc__]
InverseJacobian
KrylovJacobian
LbfgsInvHessProduct Linear operator for the L-BFGS approximate inverse Hessian. [extrait de LbfgsInvHessProduct.__doc__]
LinearConstraint Linear constraint on the variables. [extrait de LinearConstraint.__doc__]
NonlinearConstraint Nonlinear constraint on the variables. [extrait de NonlinearConstraint.__doc__]
OptimizeResult
RootResults Represents the root finding result. [extrait de RootResults.__doc__]
SR1 Symmetric-rank-1 Hessian update strategy. [extrait de SR1.__doc__]

Liste des exceptions du module scipy.optimize

Nom de la classe d'exception Description
NoConvergence Exception raised when nonlinear solver fails to converge within the specified [extrait de NoConvergence.__doc__]
OptimizeWarning

Liste des fonctions du module scipy.optimize

Signature de la fonction Description
anderson(F, xin, iter=None, alpha=None, w0=0.01, M=5, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
approx_fprime(xk, f, epsilon=np.float64(1.4901161193847656e-08), *args) Finite difference approximation of the derivatives of a [extrait de approx_fprime.__doc__]
basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5, minimizer_kwargs=None, take_step=None, accept_test=None, callback=None, interval=50, disp=False, niter_success=None, rng=None, *, target_accept_rate=0.5, stepwise_factor=0.9)
bisect(f, a, b, args=(), xtol=2e-12, rtol=np.float64(8.881784197001252e-16), maxiter=100, full_output=False, disp=True)
bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000)
brent(func, args=(), brack=None, tol=1.48e-08, full_output=0, maxiter=500)
brenth(f, a, b, args=(), xtol=2e-12, rtol=np.float64(8.881784197001252e-16), maxiter=100, full_output=False, disp=True) Find a root of a function in a bracketing interval using Brent's [extrait de brenth.__doc__]
brentq(f, a, b, args=(), xtol=2e-12, rtol=np.float64(8.881784197001252e-16), maxiter=100, full_output=False, disp=True)
broyden1(F, xin, iter=None, alpha=None, reduction_method='restart', max_rank=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
broyden2(F, xin, iter=None, alpha=None, reduction_method='restart', max_rank=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
brute(func, ranges, args=(), Ns=20, full_output=0, finish=<function fmin at 0x0000020DFE4D0040>, disp=False, workers=1) Minimize a function over a given range by brute force. [extrait de brute.__doc__]
check_grad(func, grad, x0, *args, epsilon=np.float64(1.4901161193847656e-08), direction='all', rng=None)
curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False, check_finite=None, bounds=(-inf, inf), method=None, jac=None, *, full_output=False, nan_policy=None, **kwargs)
diagbroyden(F, xin, iter=None, alpha=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
differential_evolution(func, bounds, args=(), strategy='best1bin', maxiter=1000, popsize=15, tol=0.01, mutation=(0.5, 1), recombination=0.7, rng=None, callback=None, disp=False, polish=True, init='latinhypercube', atol=0, updating='immediate', workers=1, constraints=(), x0=None, *, integrality=None, vectorized=False)
direct(func: Callable[[numpy.ndarray[tuple[int], numpy.dtype[numpy.float64]]], float | numpy.floating[Any] | numpy.integer[Any] | numpy.bool], bounds: Union[Iterable, scipy.optimize._constraints.Bounds], *, args: tuple = (), eps: float = 0.0001, maxfun: int | None = None, maxiter: int = 1000, locally_biased: bool = True, f_min: float = -inf, f_min_rtol: float = 0.0001, vol_tol: float = 1e-16, len_tol: float = 1e-06, callback: Optional[Callable[[numpy.ndarray[tuple[int], numpy.dtype[numpy.float64]]], object]] = None) -> scipy.optimize._optimize.OptimizeResult
dual_annealing(func, bounds, args=(), maxiter=1000, minimizer_kwargs=None, initial_temp=5230.0, restart_temp_ratio=2e-05, visit=2.62, accept=-5.0, maxfun=10000000.0, rng=None, no_local_search=False, callback=None, x0=None)
excitingmixing(F, xin, iter=None, alpha=None, alphamax=1.0, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
fixed_point(func, x0, args=(), xtol=1e-08, maxiter=500, method='del2')
fmin(func, x0, args=(), xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, initial_simplex=None)
fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-05, norm=inf, epsilon=np.float64(1.4901161193847656e-08), maxiter=None, full_output=0, disp=1, retall=0, callback=None, xrtol=0, c1=0.0001, c2=0.9, hess_inv0=None)
fmin_cg(f, x0, fprime=None, args=(), gtol=1e-05, norm=inf, epsilon=np.float64(1.4901161193847656e-08), maxiter=None, full_output=0, disp=1, retall=0, callback=None, c1=0.0001, c2=0.4)
fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=0.0001, maxfun=1000, disp=None, catol=0.0002, *, callback=None)
fmin_l_bfgs_b(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=10000000.0, pgtol=1e-05, epsilon=1e-08, iprint=-1, maxfun=15000, maxiter=15000, disp=None, callback=None, maxls=20)
fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-05, epsilon=np.float64(1.4901161193847656e-08), maxiter=None, full_output=0, disp=1, retall=0, callback=None, c1=0.0001, c2=0.9)
fmin_powell(func, x0, args=(), xtol=0.0001, ftol=0.0001, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None)
fmin_slsqp(func, x0, eqcons=(), f_eqcons=None, ieqcons=(), f_ieqcons=None, bounds=(), fprime=None, fprime_eqcons=None, fprime_ieqcons=None, args=(), iter=100, acc=1e-06, iprint=1, disp=None, full_output=0, epsilon=np.float64(1.4901161193847656e-08), callback=None)
fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, epsilon=1e-08, scale=None, offset=None, messages=15, maxCGit=-1, maxfun=None, eta=-1, stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1, rescale=-1, disp=None, callback=None)
fminbound(func, x1, x2, args=(), xtol=1e-05, maxfun=500, full_output=0, disp=1) Bounded minimization for scalar functions. [extrait de fminbound.__doc__]
fsolve(func, x0, args=(), fprime=None, full_output=0, col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, epsfcn=None, factor=100, diag=None)
golden(func, args=(), brack=None, tol=np.float64(1.4901161193847656e-08), full_output=0, maxiter=5000)
isotonic_regression(y: 'npt.ArrayLike', *, weights: 'npt.ArrayLike | None' = None, increasing: bool = True) -> scipy.optimize._optimize.OptimizeResult Nonparametric isotonic regression. [extrait de isotonic_regression.__doc__]
least_squares(fun, x0, jac='2-point', bounds=(-inf, inf), method='trf', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options=None, jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs=None) Solve a nonlinear least-squares problem with bounds on the variables. [extrait de least_squares.__doc__]
leastsq(func, x0, args=(), Dfun=None, full_output=False, col_deriv=False, ftol=1.49012e-08, xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None)
linear_sum_assignment Solve the linear sum assignment problem. [extrait de linear_sum_assignment.__doc__]
linearmixing(F, xin, iter=None, alpha=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=(0, None), method='highs', callback=None, options=None, x0=None, integrality=None)
linprog_verbose_callback(res)
lsq_linear(A, b, bounds=(-inf, inf), method='trf', tol=1e-10, lsq_solver=None, lsmr_tol=None, max_iter=None, verbose=0, *, lsmr_maxiter=None) Solve a linear least-squares problem with bounds on the variables. [extrait de lsq_linear.__doc__]
milp(c, *, integrality=None, bounds=None, constraints=None, options=None)
minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None) Minimization of scalar function of one or more variables. [extrait de minimize.__doc__]
minimize_scalar(fun, bracket=None, bounds=None, args=(), method=None, tol=None, options=None) Local minimization of scalar function of one variable. [extrait de minimize_scalar.__doc__]
newton(func, x0, fprime=None, args=(), tol=1.48e-08, maxiter=50, fprime2=None, x1=None, rtol=0.0, full_output=False, disp=True)
newton_krylov(F, xin, iter=None, rdiff=None, method='lgmres', inner_maxiter=20, inner_M=None, outer_k=10, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw)
nnls(A, b, maxiter=None, *, atol=None)
quadratic_assignment(A, B, method='faq', options=None)
ridder(f, a, b, args=(), xtol=2e-12, rtol=np.float64(8.881784197001252e-16), maxiter=100, full_output=False, disp=True)
root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None)
root_scalar(f, args=(), method=None, bracket=None, fprime=None, fprime2=None, x0=None, x1=None, xtol=None, rtol=None, maxiter=None, options=None)
rosen(x)
rosen_der(x)
rosen_hess(x)
rosen_hess_prod(x, p)
shgo(func, bounds, args=(), constraints=None, n=100, iters=1, callback=None, minimizer_kwargs=None, options=None, sampling_method='simplicial', *, workers=1)
show_options(solver=None, method=None, disp=True)
test(label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False, tests=None, parallel=None)
toms748(f, a, b, args=(), k=1, xtol=2e-12, rtol=np.float64(8.881784197001252e-16), maxiter=100, full_output=False, disp=True)

Liste des alias du module scipy.optimize

Nom de l'alias Définition ciblée
line_search line_search_wolfe2


Vous êtes un professionnel et vous avez besoin d'une formation ? Mise en oeuvre d'IHM
avec Qt et PySide6
Voir le programme détaillé