Multi-dimensional state estimation in adversarial environment


Yilin Mo and Richard M. Murray

Chinese Control Conference, 2015

Link to the paper

Link to the presentation

If you want to leave any comments, you can annotate the pdf. I will try to be responsive. You can also annotate this page or leave comments below.

Abstract

We consider the estimation of a vector state based on \(m\) measurements that can be potentially manipulated by an adversary. The attacker is assumed to have limited resources and can only manipulate up to \(l\) of the \(m\) measurements. However, it can the compromise measurements arbitrarily. The problem is formulated as a minimax optimization, where one seeks to construct an optimal estimator that minimizes the "worst-case" error against all possible manipulations by the attacker and all possible sensor noises. We show that if the system is not observable after removing \(2l\) sensors, then the worst-case error is infinite, regardless of the estimation strategy. If the system remains observable after removing arbitrary set of \(2l\) sensor, we prove that the optimal state estimation can be computed by solving a semidefinite programming problem. A numerical example is provided to illustrate the effectiveness of the proposed state estimator.

Corrections1

  • The \(r^2\) in (19) should be \(-r^2\).
  • The \(\varphi\) in (20) should be \(-\varphi\).

Simulation Code

The code is written in Python 3.4 with the following dependencies:

  • numpy 1.8.2
  • scipy 0.13.3
  • cvxopt 1.1.7
  • cvxpy 0.2.24
  • matplotlib 1.3.1
  • seaborn 0.6.0
import numpy as np
from numpy.random import rand
from numpy import cos, sin, sqrt, matrix, eye, zeros
from numpy.linalg import inv, svd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns #for the sake of appearance
from matplotlib.patches import Ellipse
from itertools import combinations
import cvxpy as cvx

# returns the boundary of an ellipse: (x-c)'Q(x-c) == 1
def convert2Ellipse(c, Q): 
    if c.shape == (2,1) and Q.shape == (2,2):
        Q = (Q + Q.T)/2
        U, s, V = svd(Q)
        theta = np.linspace(0, 2 * np.pi, 360)
        x = U[0,0] * cos(theta) / sqrt(s[0]) + U[0,1] * sin(theta) / sqrt(s[1]) + c[0,0]
        y = U[1,0] * cos(theta) / sqrt(s[0]) + U[1,1] * sin(theta) / sqrt(s[1]) + c[1,0]
        return x, y
    else:
        return None

#static parameters
n = 2
m = 4
l = 1
delta = 1.
H = matrix([[1, 0],
           [0, 1],
           [1, 1],
           [1, -1]],'f')
G = matrix(eye(m))
#generate x, w, a, y randomly
x = matrix(rand(n, 1))
w = matrix(rand(m, 1))
while np.linalg.norm(w) >= 1:
    w = rand(m, 1)

a = matrix([0, 0, 0, 1.5]).T
y = H * x + G * w + a

# Or in this simulation we will use the worst-case y
y = matrix('-0.851;2.753;0.5257;0')

OmegaList = []
for indexset in combinations(range(m), m - l): 
    HI = H[indexset, :]
    GI = G[indexset, :]
    yI = y[indexset, :]
    XiI = GI * GI.T
    invXiI = inv(XiI)
    PI = inv(HI.T * invXiI * HI)
    KI = PI * HI.T * invXiI
    tmp = eye(m-l) - HI * KI
    UI = tmp.T * invXiI * tmp
    hatxI = KI * yI
    epsilonI = yI.T * UI * yI
    if delta**2 >= epsilonI: #if the ellipsoid exists
        # Generate the Omega of Theorem 4
        Omega = np.bmat([[inv(PI), -1 * inv(PI) * hatxI, zeros((n,n))],
                      [-1 * hatxI.T * inv(PI), hatxI.T * inv(PI) * hatxI + epsilonI-delta**2, zeros((1,n))],
                      [zeros((n, 2 * n + 1))]])
        OmegaList.append(Omega)
        #Draw the corresponding ellipse
        Q = inv(PI)/(delta ** 2 - epsilonI)
        ex, ey = convert2Ellipse(hatxI, Q)
        plt.fill(ex, ey)
        plt.plot(ex, ey)

# convex optimization problem described in (20)
tau = cvx.Variable(len(OmegaList))
varphi = cvx.Variable()
hatx = cvx.Variable(n)
# X corresponds to the the matrix 
# |  I      -hatx     0  |
# |-hatx'  -varphi  hatx'|
# |  0       hatx    -I  |
X = cvx.Symmetric(2*n+1) 
constraints = [varphi >= 0,
               tau >= 0,
               X[:n,:n] == eye(n),
               X[:n,n+1:] == zeros((n, n)),
               X[:n, n] == -hatx,
               X[n,n] == -varphi,
               X[n+1:, n] == hatx,
               X[n+1:, n+1:] == -1 * eye(n),]
obj = cvx.Minimize(varphi)
for i in range(len(OmegaList)):
    constraints.append(tau[i] * OmegaList[i] - X >> 0)
prob = cvx.Problem(obj, constraints)
prob.solve()

#plot the corresponding ellipse and its center
ex, ey = convert2Ellipse(hatx.value, eye(n)/varphi.value)
plt.plot(ex, ey, 'r--')
plt.plot(hatx.value[0], hatx.value[1], 'ro')

# Compute the state estimation given by (21)
hatx = inv(H.T * inv(G*G.T) * H) * H.T * inv(G*G.T) * y

# Find the maximum error, i.e., the minimum radius circle
tau = cvx.Variable(len(OmegaList))
varphi = cvx.Variable()
X = cvx.Symmetric(2*n+1)
constraints = [varphi >= 0,
               tau >= 0,
               X[:n,:n] == eye(n),
               X[:n,n+1:] == zeros((n, n)),
               X[:n, n] == -hatx,
               X[n,n] == -varphi,
               X[n+1:, n] == hatx,
               X[n+1:, n+1:] == -1 * eye(n),]
obj = cvx.Minimize(varphi)
for i in range(len(OmegaList)):
    constraints.append(tau[i] * OmegaList[i] - X >> 0)
prob = cvx.Problem(obj, constraints)
prob.solve()

#plot the corresponding ellipse and its center
ex, ey = convert2Ellipse(hatx, eye(n)/varphi.value)
plt.plot(ex, ey, 'k--')
plt.plot(hatx[0], hatx[1], 'ks')

plt.axis('equal')
plt.tight_layout()
plt.savefig('../../../public/ccc-15.png')
return '../../../public/ccc-15.png' # return the filename to org-mode

Footnotes:

1

The manuscript on this web page has been corrected. The submitted manuscript contains the errors.