试试像这样的
编辑:添加了一个使用线性回归结果估计输出的示例函数。import numpy as np
data =np.array(
[[-0.042780748663101636, -0.0040771571786609945, -0.00506567946276074],
[0.042780748663101636, -0.0044771571786609945, -0.10506567946276074],
[0.542780748663101636, -0.005771571786609945, 0.30506567946276074],
[-0.342780748663101636, -0.0304077157178660995, 0.90506567946276074]])
coefficient = data[:,0:2]
dependent = data[:,-1]
x,residuals,rank,s = np.linalg.lstsq(coefficient,dependent)
def f(x,u,v):
return u*x[0] + v*x[1]
for datum in data:
print f(x,*datum[0:2])
它给予>>> x
array([ 0.16991146, -30.18923739])
>>> residuals
array([ 0.07941146])
>>> rank
2
>>> s
array([ 0.64490113, 0.02944663])
用你的系数创建的函数0.115817326583
0.142430900298
0.266464019171
0.859743371665
更多信息可以在我作为评论发布的documentation上找到。
编辑2:将数据拟合到任意模型。
编辑3:使我的模型成为易于理解的函数。
编辑4:使代码更容易读取/更改为二次拟合模型,但您应该能够读取此代码并知道如何使其最小化您现在想要的任何剩余。
捏造的例子:import numpy as np
from scipy.optimize import leastsq
data =np.array(
[[-0.042780748663101636, -0.0040771571786609945, -0.00506567946276074],
[0.042780748663101636, -0.0044771571786609945, -0.10506567946276074],
[0.542780748663101636, -0.005771571786609945, 0.30506567946276074],
[-0.342780748663101636, -0.0304077157178660995, 0.90506567946276074]])
coefficient = data[:,0:2]
dependent = data[:,-1]
def model(p,x):
a,b,c = p
u = x[:,0]
v = x[:,1]
return (a*u**2 + b*v + c)
def residuals(p, y, x):
a,b,c = p
err = y - model(p,x)
return err
p0 = np.array([2,3,4]) #some initial guess
p = leastsq(residuals, p0, args=(dependent, coefficient))[0]
def f(p,x):
return p[0]*x[0] + p[1]*x[1] + p[2]
for x in coefficient:
print f(p,x)
给予-0.108798280153
-0.00470479385807
0.570237823475
0.413016072653