Skip to content

Instantly share code, notes, and snippets.

@rafaelrojasmiliani
Last active November 7, 2024 13:05
Show Gist options
  • Save rafaelrojasmiliani/c0a6706266b319370a6ba01b1df5c4f0 to your computer and use it in GitHub Desktop.
Save rafaelrojasmiliani/c0a6706266b319370a6ba01b1df5c4f0 to your computer and use it in GitHub Desktop.
Stable-by-Design Kinematic Control Based on Optimization: Goncalves, Adorno and Fraisse
class cGoncalvesKinematicControl(object):
''' implementation of
V. M. Goncalves, B. V. Adorno, A. Crosnier and P. Fraisse,
"Stable-by-Design Kinematic Control Based on Optimization," in IEEE
Transactions on Robotics, vol. 36, no. 3, pp. 644-656, June 2020, doi:
10.1109/TRO.2019.2963665.
with
- Lyapunov function as the squared l2 norm of the error
V = \|error\|_2^2
- Control contrains as joint velocity limits
- Function Psi chosen in order to design the closed loop behaviour
'''
def __init__(self, _njoints, _kappa, _eta, _joint_vel_lim = 10.0):
self.n_ = _njoints
self.kappa_ = _kappa
self.eta_ = _eta
self.joint_vel_lim_ = _joint_vel_lim
n = self.n_
Aun = np.vstack([np.eye(n), -np.eye(n)])
bun = np.array(2*n*[self.joint_vel_lim_])
Aeq = np.array(n*[0.0])
beq = np.array([0.0])
self.un_const_matrix_ = matrix(Aun)
self.un_const_vector_ = matrix(bun)
self.eq_const_matrix_ = matrix(Aeq.reshape(1, -1))
self.eq_const_vector_ = matrix(beq)
Q = np.eye(n)
p = np.array(n*[0.0])
self.quadratic_cost_matrix_ = matrix(Q)
self.quadratic_cost_vector_ = matrix(p)
self.rho_ = 0.5
def psi(self, _lyap, _lyap_grad):
'''
_lyap: float,
value of lyapunov function
_lyap_grad: numpy.array
gradient of lyapunov function w.r.t. the join positions (spatial
gradient)
'''
eta = self.eta_
kappa = self.kappa_
V = _lyap
dVdq_norm = np.linalg.norm(_lyap_grad)
result = eta * V * np.tanh(kappa*dVdq_norm)
return result
def get_rho_psi(self, _lyap, _lyap_grad, _lyap_dt):
'''
Get the value of rho to make tge problem feasible
_error, numpy.array
tracking error
_qd_d, numpy.array
desired velocity'''
dVdq = _lyap_grad
dVdt = _lyap_dt
Aun = self.un_const_matrix_
bun = self.un_const_vector_
result = linear_program(matrix(dVdq), Aun, bun)
assert result['status'] == 'optimal', 'ERROR ON GONCALVES: bad problem formulation'
v_star = result['x']
Psi = self.psi(_lyap, _lyap_grad)
rho_star = -(dVdq.dot(v_star)+dVdt)/Psi
rho = min(1.0, rho_star)
assert rho >= 0.0, 'ERROR ON GONCALVES: bad problem formulation'
return rho, Psi
def get_control(self, _error, _qd_d):
V = np.linalg.norm(_error)**2
dVdq = -2*_error
dVdt = 2*_error.dot(_qd_d)
Psi = self.psi(V, dVdq)
rho = self.rho_
Q = self.quadratic_cost_matrix_
p = self.quadratic_cost_vector_
Aeq = self.eq_const_matrix_
beq = self.eq_const_vector_
Aun = self.un_const_matrix_
bun = self.un_const_vector_
Aeq[:] = dVdq
beq[0] = -dVdt - rho*Psi
try:
result = quadratic_program(Q, p, Aun, bun, Aeq, beq)
u = list(result['x'])
except:
rho, Psi = self.get_rho_psi(V, dVdq, dVdt)
self.rho_ = rho
beq[0] = -dVdt - rho*Psi
try:
result = quadratic_program(Q, p, Aun, bun, Aeq, beq)
u = list(result['x'])
except:
u = (- 0.5 * _error + _qd_d).tolist()
return u
def set_parameters(self, _eta, _kappa, _umax):
self.eta_ = _eta
self.kappa_ = _kappa
self.joint_vel_lim_ = _umax
n= self.n_
bun = np.array(2*n*[self.joint_vel_lim_])
self.un_const_vector_ = matrix(bun)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment