pixhawk position_estimator_inav.cpp思路整理及数据流

写在前面:

这篇blog主要参考pixhawk的高度解算算法解读,并且加以扩展,扩展到其他传感器,其实里面处理好多只是记录了流程,至于里面实际物理意义并不是很清楚,也希望大牛能够指导一下。

概述:

整个算法的核心思想是由地理坐标系下的加速度通过积分,来获得速度、位置信息;经过2次修正产生可利用的信息,第一次是利用传感器计算修正系数产生加速度的偏差修正加速度,第二次是利用修正系数修正位置;最后可利用速度经过加速度修正,可利用的位置经过了加速度和位置修正。加速度的修正过程是由机体测量的加速度通过减去偏差,再转换到地理坐标系;位置的修正统一利用inertial_filter_correct()函数。

这里传感器的作用就是计算一个校正系数来对加速度偏移量进行校正。

代码思路

1. 变量初始化。

float z_est[2] = { 0.0f, 0.0f }; // z轴的高度、速度
float acc[] = { 0.0f, 0.0f, 0.0f }; //地理坐标系(NED)的加速度数据
float acc_bias[] = { 0.0f, 0.0f, 0.0f }; // 机体坐标系下的加速度偏移量

float corr_baro = 0.0f;		// D
float corr_gps[3][2] = {
	{ 0.0f, 0.0f },		// N (pos, vel)
	{ 0.0f, 0.0f },		// E (pos, vel)
	{ 0.0f, 0.0f },		// D (pos, vel)
};
float corr_vision[3][2] = {
	{ 0.0f, 0.0f },		// N (pos, vel)
	{ 0.0f, 0.0f },		// E (pos, vel)
	{ 0.0f, 0.0f },		// D (pos, vel)
};
float corr_mocap[3][1] = {
	{ 0.0f },		// N (pos)
	{ 0.0f },		// E (pos)
	{ 0.0f },		// D (pos)
};
float corr_lidar = 0.0f;//据说是超声波
float corr_flow[] = { 0.0f, 0.0f };	// N E

bool gps_valid = false;			// GPS is valid
bool lidar_valid = false;		// lidar is valid
bool flow_valid = false;		// flow is valid
bool flow_accurate = false;		// flow should be accurate (this flag not updated if flow_valid == false)
bool vision_valid = false;		// vision is valid
bool mocap_valid = false;		// mocap is valid

2. 计算气压计高度的零点偏移,主要是取200个数据求平均。

baro_offset += sensor.baro_alt_meter;
baro_offset /= (float) baro_init_cnt;

3.各传感器计算得带各自的修正系数和权重

corr_baro = baro_offset - sensor.baro_alt_meter[0] - z_est[0];
corr_lidar = lidar_offset - dist_ground - z_est[0];
corr_flow[0] = flow_v[0] - x_est[1]; /* velocity correction */
corr_flow[1] = flow_v[1] - y_est[1];
corr_vision[0][0] = vision.x - x_est[0]; /* calculate correction for position */
corr_vision[1][0] = vision.y - y_est[0];
corr_vision[2][0] = vision.z - z_est[0];
corr_vision[0][1] = vision.vx - x_est[1]; /* calculate correction for velocity */
corr_vision[1][1] = vision.vy - y_est[1];
corr_vision[2][1] = vision.vz - z_est[1];
corr_mocap[0][0] = mocap.x - x_est[0]; /* calculate correction for position */
corr_mocap[1][0] = mocap.y - y_est[0];
corr_mocap[2][0] = mocap.z - z_est[0];
corr_gps[0][0] = gps_proj[0] - est_buf[est_i][0][0]; /* calculate correction for position */
corr_gps[1][0] = gps_proj[1] - est_buf[est_i][1][0];
corr_gps[2][0] = local_pos.ref_alt - alt - est_buf[est_i][2][0];
corr_gps[0][1] = gps.vel_n_m_s - est_buf[est_i][0][1];/* calculate correction for velocity */
corr_gps[1][1] = gps.vel_e_m_s - est_buf[est_i][1][1];
corr_gps[2][1] = gps.vel_d_m_s - est_buf[est_i][2][1];
w_gps_xy = min_eph_epv / fmaxf(min_eph_epv, gps.eph);
w_gps_z = min_eph_epv / fmaxf(min_eph_epv, gps.epv);

4.判断是否超时

if ((flow_valid || lidar_valid) && t > (flow_time + flow_topic_timeout))
if (gps_valid && (t > (gps.timestamp_position + gps_topic_timeout)))
if (vision_valid && (t > (vision.timestamp_boot + vision_topic_timeout)))
if (mocap_valid && (t > (mocap.timestamp_boot + mocap_topic_timeout)))
if (lidar_valid && (t > (lidar_time + lidar_timeout)))

5.判断是用哪一个传感器

/* use GPS if it's valid and reference position initialized */
bool use_gps_xy = ref_inited && gps_valid && params.w_xy_gps_p > MIN_VALID_W;
bool use_gps_z = ref_inited && gps_valid && params.w_z_gps_p > MIN_VALID_W;
/* use VISION if it's valid and has a valid weight parameter */
bool use_vision_xy = vision_valid && params.w_xy_vision_p > MIN_VALID_W;
bool use_vision_z = vision_valid && params.w_z_vision_p > MIN_VALID_W;
/* use MOCAP if it's valid and has a valid weight parameter */
bool use_mocap = mocap_valid && params.w_mocap_p > MIN_VALID_W && params.att_ext_hdg_m == mocap_heading; //check if external heading is mocap
if (params.disable_mocap) { //disable mocap if fake gps is used
	use_mocap = false;
}
/* use flow if it's valid and (accurate or no GPS available) */
bool use_flow = flow_valid && (flow_accurate || !use_gps_xy);
/* use LIDAR if it's valid and lidar altitude estimation is enabled */
use_lidar = lidar_valid && params.enable_lidar_alt_est;

6.计算权重

float flow_q = flow.quality / 255.0f;
float flow_q_weight = (flow_q - params.flow_q_min) / (1.0f - params.flow_q_min);
w_flow = PX4_R(att.R, 2, 2) * flow_q_weight / fmaxf(1.0f, flow_dist);
if (!flow_accurate) {
	w_flow *= 0.05f;
}

float w_xy_gps_p = params.w_xy_gps_p * w_gps_xy;
float w_xy_gps_v = params.w_xy_gps_v * w_gps_xy;
float w_z_gps_p = params.w_z_gps_p * w_gps_z;
float w_z_gps_v = params.w_z_gps_v * w_gps_z;

float w_xy_vision_p = params.w_xy_vision_p;
float w_xy_vision_v = params.w_xy_vision_v;
float w_z_vision_p = params.w_z_vision_p;

float w_mocap_p = params.w_mocap_p;
/* reduce GPS weight if optical flow is good */
if (use_flow && flow_accurate) {
	w_xy_gps_p *= params.w_gps_flow;
	w_xy_gps_v *= params.w_gps_flow;
}
/* baro offset correction */
if (use_gps_z) {
	float offs_corr = corr_gps[2][0] * w_z_gps_p * dt;
	baro_offset += offs_corr;
	corr_baro += offs_corr;
}
/* accelerometer bias correction for GPS (use buffered rotation matrix) */
float accel_bias_corr[3] = { 0.0f, 0.0f, 0.0f };

7.根据使用的传感器计算加速度偏差

		if (use_gps_xy) {
			accel_bias_corr[0] -= corr_gps[0][0] * w_xy_gps_p * w_xy_gps_p;
			accel_bias_corr[0] -= corr_gps[0][1] * w_xy_gps_v;
			accel_bias_corr[1] -= corr_gps[1][0] * w_xy_gps_p * w_xy_gps_p;
			accel_bias_corr[1] -= corr_gps[1][1] * w_xy_gps_v;
		}

		if (use_gps_z) {
			accel_bias_corr[2] -= corr_gps[2][0] * w_z_gps_p * w_z_gps_p;
			accel_bias_corr[2] -= corr_gps[2][1] * w_z_gps_v;
		}

		/* transform error vector from NED frame to body frame */
		for (int i = 0; i < 3; i++) {
			float c = 0.0f;

			for (int j = 0; j < 3; j++) {
				c += R_gps[j][i] * accel_bias_corr[j];
			}

			if (PX4_ISFINITE(c)) {
				acc_bias[i] += c * params.w_acc_bias * dt;
			}
		}

		/* accelerometer bias correction for VISION (use buffered rotation matrix) */
		accel_bias_corr[0] = 0.0f;
		accel_bias_corr[1] = 0.0f;
		accel_bias_corr[2] = 0.0f;

		if (use_vision_xy) {
			accel_bias_corr[0] -= corr_vision[0][0] * w_xy_vision_p * w_xy_vision_p;
			accel_bias_corr[0] -= corr_vision[0][1] * w_xy_vision_v;
			accel_bias_corr[1] -= corr_vision[1][0] * w_xy_vision_p * w_xy_vision_p;
			accel_bias_corr[1] -= corr_vision[1][1] * w_xy_vision_v;
		}

		if (use_vision_z) {
			accel_bias_corr[2] -= corr_vision[2][0] * w_z_vision_p * w_z_vision_p;
		}

		/* accelerometer bias correction for MOCAP (use buffered rotation matrix) */
		accel_bias_corr[0] = 0.0f;
		accel_bias_corr[1] = 0.0f;
		accel_bias_corr[2] = 0.0f;

		if (use_mocap) {
			accel_bias_corr[0] -= corr_mocap[0][0] * w_mocap_p * w_mocap_p;
			accel_bias_corr[1] -= corr_mocap[1][0] * w_mocap_p * w_mocap_p;
			accel_bias_corr[2] -= corr_mocap[2][0] * w_mocap_p * w_mocap_p;
		}

		/* transform error vector from NED frame to body frame */
		for (int i = 0; i < 3; i++) {
			float c = 0.0f;

			for (int j = 0; j < 3; j++) {
				c += PX4_R(att.R, j, i) * accel_bias_corr[j];
			}

			if (PX4_ISFINITE(c)) {
				acc_bias[i] += c * params.w_acc_bias * dt;
			}
		}

		/* accelerometer bias correction for flow and baro (assume that there is no delay) */
		accel_bias_corr[0] = 0.0f;
		accel_bias_corr[1] = 0.0f;
		accel_bias_corr[2] = 0.0f;

		if (use_flow) {
			accel_bias_corr[0] -= corr_flow[0] * params.w_xy_flow;
			accel_bias_corr[1] -= corr_flow[1] * params.w_xy_flow;
		}

		if (use_lidar) {
			accel_bias_corr[2] -= corr_lidar * params.w_z_lidar * params.w_z_lidar;
		} else {
			accel_bias_corr[2] -= corr_baro * params.w_z_baro * params.w_z_baro;
		}

		/* transform error vector from NED frame to body frame */
		for (int i = 0; i < 3; i++) {
			float c = 0.0f;

			for (int j = 0; j < 3; j++) {
				c += PX4_R(att.R, j, i) * accel_bias_corr[j];
			}

			if (PX4_ISFINITE(c)) {
				acc_bias[i] += c * params.w_acc_bias * dt;
			}
		}

8.预计位置

/* inertial filter prediction for altitude */
if (can_estimate_xy) {
{
inertial_filter_predict(dt, x_est, acc[0]);
inertial_filter_predict(dt, y_est, acc[1]);
}
inertial_filter_predict(dt, z_est, acc[2]);

函数解析

这里x_est、y_est、z_est通过float
x[2]传进来来后,经过函数处理直接传回来给x_est、y_est、z_est(这里和C语法有点不同,但是不这么解释就说不过去了)

void inertial_filter_predict(float dt, float x[2], float acc)
{
	if (isfinite(dt)) {
		if (!isfinite(acc)) {
			acc = 0.0f;
		}
		x[0] += x[1] * dt + acc * dt * dt / 2.0f;
		x[1] += acc * dt;
	}
}

9.修正位置

/* inertial filter correction for altitude */
if (use_lidar) {
	inertial_filter_correct(corr_lidar, dt, z_est, 0, params.w_z_lidar);
} else {
	inertial_filter_correct(corr_baro, dt, z_est, 0, params.w_z_baro);
}
if (use_gps_z) {
	epv = fminf(epv, gps.epv);
	inertial_filter_correct(corr_gps[2][0], dt, z_est, 0, w_z_gps_p);
	inertial_filter_correct(corr_gps[2][1], dt, z_est, 1, w_z_gps_v);
}
if (use_vision_z) {
	epv = fminf(epv, epv_vision);
	inertial_filter_correct(corr_vision[2][0], dt, z_est, 0, w_z_vision_p);
}
if (use_mocap) {
	epv = fminf(epv, epv_mocap);
	inertial_filter_correct(corr_mocap[2][0], dt, z_est, 0, w_mocap_p);
}
if (can_estimate_xy) {
	/* inertial filter correction for position */
	if (use_flow) {
		eph = fminf(eph, eph_flow);
		inertial_filter_correct(corr_flow[0], dt, x_est, 1, params.w_xy_flow * w_flow);
		inertial_filter_correct(corr_flow[1], dt, y_est, 1, params.w_xy_flow * w_flow);
	}
	if (use_gps_xy) {
		eph = fminf(eph, gps.eph);
		inertial_filter_correct(corr_gps[0][0], dt, x_est, 0, w_xy_gps_p);
		inertial_filter_correct(corr_gps[1][0], dt, y_est, 0, w_xy_gps_p);
		if (gps.vel_ned_valid && t < gps.timestamp_velocity + gps_topic_timeout) {
			inertial_filter_correct(corr_gps[0][1], dt, x_est, 1, w_xy_gps_v);
			inertial_filter_correct(corr_gps[1][1], dt, y_est, 1, w_xy_gps_v);
		}
	}
	if (use_vision_xy) {
		eph = fminf(eph, eph_vision);
		inertial_filter_correct(corr_vision[0][0], dt, x_est, 0, w_xy_vision_p);
		inertial_filter_correct(corr_vision[1][0], dt, y_est, 0, w_xy_vision_p);
		if (w_xy_vision_v > MIN_VALID_W) {
			inertial_filter_correct(corr_vision[0][1], dt, x_est, 1, w_xy_vision_v);
			inertial_filter_correct(corr_vision[1][1], dt, y_est, 1, w_xy_vision_v);
		}
	}
	if (use_mocap) {
		eph = fminf(eph, eph_mocap);
		inertial_filter_correct(corr_mocap[0][0], dt, x_est, 0, w_mocap_p);
		inertial_filter_correct(corr_mocap[1][0], dt, y_est, 0, w_mocap_p);
	}
}
/* run terrain estimator */
terrain_estimator.predict(dt, &att, &sensor, &lidar);
函数解析
e是修正系数;dt周期时间;x[2]是2个float型成员的数组,x[0]是位置,x[1]是速度;
i表示修正是位置还是速度,0是修正位置,1是修正速度;w是权重系数
这里x_est、y_est、z_est通过float x[2]传进来来后,经过函数处理直接传回来给x_est、y_est、z_est(这里和C语法有点不同,但是不这么解释就说不过去了)
void inertial_filter_correct(float e, float dt, float x[2], int i, float w)
{
	if (isfinite(e) && isfinite(w) && isfinite(dt)) {
		float ewdt = e * w * dt;
		x[i] += ewdt;

		if (i == 0) {
			x[1] += w * ewdt;
		}
	}
}

10.发布

			/* publish local position */
			local_pos.xy_valid = can_estimate_xy;
			local_pos.v_xy_valid = can_estimate_xy;
			local_pos.xy_global = local_pos.xy_valid && use_gps_xy;
			local_pos.z_global = local_pos.z_valid && use_gps_z;
			local_pos.x = x_est[0];
			local_pos.vx = x_est[1];
			local_pos.y = y_est[0];
			local_pos.vy = y_est[1];
			local_pos.z = z_est[0];
			local_pos.vz = z_est[1];
			local_pos.yaw = att.yaw;
			local_pos.dist_bottom_valid = dist_bottom_valid;
			local_pos.eph = eph;
			local_pos.epv = epv;

			if (local_pos.dist_bottom_valid) {
				local_pos.dist_bottom = dist_ground;
				local_pos.dist_bottom_rate = - z_est[1];
			}

			local_pos.timestamp = t;

			orb_publish(ORB_ID(vehicle_local_position), vehicle_local_position_pub, &local_pos);

			if (local_pos.xy_global && local_pos.z_global) {
				/* publish global position */
				global_pos.timestamp = t;
				global_pos.time_utc_usec = gps.time_utc_usec;

				double est_lat, est_lon;
				map_projection_reproject(&ref, local_pos.x, local_pos.y, &est_lat, &est_lon);

				global_pos.lat = est_lat;
				global_pos.lon = est_lon;
				global_pos.alt = local_pos.ref_alt - local_pos.z;

				global_pos.vel_n = local_pos.vx;
				global_pos.vel_e = local_pos.vy;
				global_pos.vel_d = local_pos.vz;

				global_pos.yaw = local_pos.yaw;

				global_pos.eph = eph;
				global_pos.epv = epv;

				if (terrain_estimator.is_valid()) {
					global_pos.terrain_alt = global_pos.alt - terrain_estimator.get_distance_to_ground();
					global_pos.terrain_alt_valid = true;

				} else {
					global_pos.terrain_alt_valid = false;
				}

				global_pos.pressure_alt = sensor.baro_alt_meter[0];

				if (vehicle_global_position_pub == NULL) {
					vehicle_global_position_pub = orb_advertise(ORB_ID(vehicle_global_position), &global_pos);

				} else {
					orb_publish(ORB_ID(vehicle_global_position), vehicle_global_position_pub, &global_pos);
				}
			}

常用传感器

气压+加速度=高度(此部分摘自http://blog.sina.com.cn/s/blog_8fe4f2f40102wo50.html)

1. 变量初始化。

float z_est[2] = { 0.0f, 0.0f }; // z轴的高度、速度
float acc[] = { 0.0f, 0.0f, 0.0f }; //地理坐标系(NED)的加速度数据
float acc_bias[] = { 0.0f, 0.0f, 0.0f }; //机体坐标系下的加速度偏移量
float corr_baro = 0.0f; // 气压计校正系数

2. 计算气压计高度的零点偏移,主要是取200个数据求平均。

baro_offset += sensor.baro_alt_meter;
baro_offset /= (float) baro_init_cnt;

3. 将传感器获取的机体加速度数据转换到地理坐标系下。

加速度数据要先去除偏移量;

sensor.accelerometer_m_s2[0] -=acc_bias[0];
sensor.accelerometer_m_s2[1] -= acc_bias[1];
sensor.accelerometer_m_s2[2] -=acc_bias[2];

然后转换坐标系;

acc[i] += PX4_R(att.R, i, j) *sensor.accelerometer_m_s2[j];

地理坐标系下的z轴加速度是有重力加速度的,因此补偿上去。

acc[2] += CONSTANTS_ONE_G;

4. 计算气压计的校正系数

corr_baro = baro_offset -sensor.baro_alt_meter - z_est[0];

5. 加速度偏移向量校正

accel_bias_corr[2] -= corr_baro *params.w_z_baro * params.w_z_baro;

6. 将偏移向量转换到机体坐标系

c += PX4_R(att.R, j, i) *accel_bias_corr[j];

acc_bias[i] += c * params.w_acc_bias * dt;

7. 加速度推算高度

inertial_filter_predict(dt, z_est, acc[2]);

8. 气压计校正系数进行校正

inertial_filter_correct(corr_baro, dt,z_est, 0, params.w_z_baro);

光流

orb_copy(ORB_ID(optical_flow), optical_flow_sub, &flow);
if (fabs(rates_setpoint.pitch) < rate_threshold) {
	//warnx("[inav] test ohne comp");
	flow_ang[0] = (flow.pixel_flow_x_integral / (float)flow.integration_timespan * 1000000.0f) * params.flow_k;//for now the flow has to be scaled (to small)
}
else {
	//warnx("[inav] test mit comp");
	//calculate flow [rad/s] and compensate for rotations (and offset of flow-gyro)
	flow_ang[0] = ((flow.pixel_flow_x_integral - flow.gyro_x_rate_integral) / (float)flow.integration_timespan * 1000000.0f
		       + gyro_offset_filtered[0]) * params.flow_k;//for now the flow has to be scaled (to small)
}
if (fabs(rates_setpoint.roll) < rate_threshold) {
	flow_ang[1] = (flow.pixel_flow_y_integral / (float)flow.integration_timespan * 1000000.0f) * params.flow_k;//for now the flow has to be scaled (to small)
}
else {
	//calculate flow [rad/s] and compensate for rotations (and offset of flow-gyro)
	flow_ang[1] = ((flow.pixel_flow_y_integral - flow.gyro_y_rate_integral) / (float)flow.integration_timespan * 1000000.0f
		       + gyro_offset_filtered[1]) * params.flow_k;//for now the flow has to be scaled (to small)
}

得出flow_ang[]

float dist_bottom = lidar.current_distance;
float flow_dist = dist_bottom; 

所以说光流的距离来自lidar,也就是超声波

gyro_offset_filtered[0] = flow_gyrospeed_filtered[0] - att_gyrospeed_filtered[0];
gyro_offset_filtered[1] = flow_gyrospeed_filtered[1] - att_gyrospeed_filtered[1];
gyro_offset_filtered[2] = flow_gyrospeed_filtered[2] - att_gyrospeed_filtered[2];
flow_gyrospeed[0] = flow.gyro_x_rate_integral / (float)flow.integration_timespan * 1000000.0f;
flow_gyrospeed[1] = flow.gyro_y_rate_integral / (float)flow.integration_timespan * 1000000.0f;
flow_gyrospeed[2] = flow.gyro_z_rate_integral / (float)flow.integration_timespan * 1000000.0f;
yaw_comp[0] = - params.flow_module_offset_y * (flow_gyrospeed[2] - gyro_offset_filtered[2]);
yaw_comp[1] = params.flow_module_offset_x * (flow_gyrospeed[2] - gyro_offset_filtered[2]);

PX4_R(att.R, i, j)的意思是(att.R[i* 3 + j])

if (fabs(rates_setpoint.yaw) < rate_threshold) {
	flow_m[0] = -flow_ang[0] * flow_dist;
	flow_m[1] = -flow_ang[1] * flow_dist;
} else {
	flow_m[0] = -flow_ang[0] * flow_dist - yaw_comp[0] * params.flow_k;
	flow_m[1] = -flow_ang[1] * flow_dist - yaw_comp[1] * params.flow_k;
}

得出flow_m[]光流测量向量

flow_m[2] = z_est[1];
for (int i = 0; i < 2; i++) {
	for (int j = 0; j < 3; j++) {
		flow_v[i] += PX4_R(att.R, i, j) * flow_m[j];
	}
}
corr_flow[0] = flow_v[0] - x_est[1];
corr_flow[1] = flow_v[1] - y_est[1];

得出corr_flow[]

accel_bias_corr[0] -= corr_flow[0] * params.w_xy_flow;
accel_bias_corr[1] -= corr_flow[1] * params.w_xy_flow;

得出accel_bias_corr[]

PX4_R(att.R, i, j)的意思是(att.R[i* 3 + j])

/* transform error vector from NED frame to body frame */
for (int i = 0; i < 3; i++) {
	float c = 0.0f;
	for (int j = 0; j < 3; j++) {
		c += PX4_R(att.R, j, i) * accel_bias_corr[j];
	}
	if (PX4_ISFINITE(c)) {
		acc_bias[i] += c * params.w_acc_bias * dt;
	}
}

得出acc_bias[]

inertial_filter_predict(dt, x_est, acc[0]);
inertial_filter_predict(dt, y_est, acc[1]);

得出x_est、y_est

inertial_filter_correct(corr_flow[0], dt, x_est, 1, params.w_xy_flow * w_flow);
inertial_filter_correct(corr_flow[1], dt, y_est, 1, params.w_xy_flow * w_flow);

得出修正后的x_est、y_est

GPS

orb_copy(ORB_ID(vehicle_gps_position), vehicle_gps_position_sub, &gps);
float alt = gps.alt * 1e-3;
local_pos.ref_alt = alt + z_est[0];
est_buf[buf_ptr][0][0] = x_est[0];
est_buf[buf_ptr][0][1] = x_est[1];
est_buf[buf_ptr][1][0] = y_est[0];
est_buf[buf_ptr][1][1] = y_est[1];
est_buf[buf_ptr][2][0] = z_est[0];
est_buf[buf_ptr][2][1] = z_est[1];
map_projection_project(&ref, lat, lon, &(gps_proj[0]), &(gps_proj[1]));
corr_gps[0][0] = gps_proj[0] - est_buf[est_i][0][0];
corr_gps[1][0] = gps_proj[1] - est_buf[est_i][1][0];
corr_gps[2][0] = local_pos.ref_alt - alt - est_buf[est_i][2][0];
corr_gps[0][1] = gps.vel_n_m_s - est_buf[est_i][0][1];
corr_gps[1][1] = gps.vel_e_m_s - est_buf[est_i][1][1];
corr_gps[2][1] = gps.vel_d_m_s - est_buf[est_i][2][1];

得出corr_gps[][]

bool use_gps_xy = ref_inited && gps_valid && params.w_xy_gps_p > MIN_VALID_W;
accel_bias_corr[0] -= corr_gps[0][0] * w_xy_gps_p * w_xy_gps_p;
accel_bias_corr[0] -= corr_gps[0][1] * w_xy_gps_v;
accel_bias_corr[1] -= corr_gps[1][0] * w_xy_gps_p * w_xy_gps_p;
accel_bias_corr[1] -= corr_gps[1][1] * w_xy_gps_v;
accel_bias_corr[2] -= corr_gps[2][0] * w_z_gps_p * w_z_gps_p;
accel_bias_corr[2] -= corr_gps[2][1] * w_z_gps_v;

得出accel_bias_corr[]

/* push current rotation matrix to buffer */
memcpy(R_buf[buf_ptr], att.R, sizeof(att.R));
/* save rotation matrix at this moment */
memcpy(R_gps, R_buf[est_i], sizeof(R_gps));
/* transform error vector from NED frame to body frame */
for (int i = 0; i < 3; i++) {
	float c = 0.0f;
	for (int j = 0; j < 3; j++) {
		c += R_gps[j][i] * accel_bias_corr[j];
	}
	if (PX4_ISFINITE(c)) {
		acc_bias[i] += c * params.w_acc_bias * dt;
	}
}

得出acc_bias[]

// gps[a][b],a=0则为x方向,a=1则为y方向,a=2则为z方向
//b=0则为位置,b=1则为速度
inertial_filter_predict(dt, z_est, acc[2]);

得出z_est

inertial_filter_correct(corr_gps[2][0], dt, z_est, 0, w_z_gps_p);
inertial_filter_correct(corr_gps[2][1], dt, z_est, 1, w_z_gps_v);

得出修正后的z_est

inertial_filter_predict(dt, x_est, acc[0]);
inertial_filter_predict(dt, y_est, acc[1]);
inertial_filter_correct(corr_gps[0][0], dt, x_est, 0, w_xy_gps_p);
inertial_filter_correct(corr_gps[1][0], dt, y_est, 0, w_xy_gps_p);
inertial_filter_correct(corr_gps[0][1], dt, x_est, 1, w_xy_gps_v);
inertial_filter_correct(corr_gps[1][1], dt, y_est, 1, w_xy_gps_v);
				
时间: 2024-10-09 00:01:46

pixhawk position_estimator_inav.cpp思路整理及数据流的相关文章

快速排序思路整理

引言: 快速排序和归并排序是面试当中常常被问到的两种排序算法,在研究过数据结构所有的排序算法后,个人认为最复杂的当属快速排序.从代码量上来看,快速排序并不多,我之所以认为快排难以掌握是因为快排是一种递归算法,同时终止条件较多.如果你刚刚把快排的思路整理过一遍可能觉得不难,然而一个月之后呢? 面试要求的是临场发挥,如果不是烂熟于心,基本就卡壳了.在面试官眼里,你和那些完全不懂快速排序算法的菜逼是一样的,也许实际上你可能私底下已经理解很多遍了,然而并没卵.所以当下问题就是,如何将快排烂熟于心?我觉得

搜索与排名思路整理

学习<集体智慧编程>第4章的思路整理: 本章的主要内容就是建立一个模拟的全文搜索引擎,主要的大步骤为:1.检索网页,2.建立索引,3.对网页进行搜索 4.多种方式对搜索结果进行排名 一.检索网页:主要利用python写了一个爬虫软件,通过爬取一个网站上链接来不断的扩充爬取的内容.主要利用了python的urllib库和BeautifulSoup库.这部分比较简单,核心代码如下: def crawl(self,pages,depth=2): for i in range(depth): newp

16 飞机大战:思路整理、重要代码

思路整理 重要代码 0.重写方法万万检查记得要不要继承父类方法 def __init__(self): super().__init__() 1.创建游戏时钟:用来设置游戏刷新率 # 新建游戏时钟对象 self.clock = pygame.time.Clock() ... ... # 设置游戏刷新率 self.clock.tick(60) #60帧/s 2.精灵组 # 创建xx精灵 self.xx = Xx() #其中Xx是Xx类 # 创建xx精灵组 self.xx_group = pygam

能力库开发思路整理

能力库界面如下: 相关数据库表: 1 CREATE TABLE `base_ability` ( 2 `abillty_id` varchar(36) NOT NULL DEFAULT '' COMMENT '主键', 3 `ability_code` varchar(20) DEFAULT NULL, 4 `ability_name` varchar(20) DEFAULT NULL COMMENT '能力编号', 5 `ability_name_desc` varchar(255) DEFA

Canvas---Canvas图像处理、图片查看器实现思路整理、拖动边界控制

没想到一个图片查看器花了我这么多时间,而且没做好. 现在整理下思路,然后把不足的地方记一下,日后请教他人. 基本思路: 一.图片查看器功能---缩放 要实现自由缩放,先要实现图片对canvas的自适应,就是给你一张大图片,你能够把它合理缩放后恰好绘制在canvas中. 具体做法是:例如:图片为500*500,canvas为240*320,那就取缩放宽度为240,长度为240/500*500,利用缩放宽度与长度,绘制图片即可. 然后是自由缩放,这时,你的缩小放大对象只要是一个矩形就好,然后图片去适

Lync2013 升级Skype For Bussiness 2015 升级思路整理

最近做了次Lync 2013企业版升级到SFB 2015,期间碰到了各种问题.这里就专门整理下升级的思路. 至于升级过程实战的文章,后续有空再写写,其实还是很简单的. 后续todo:SFB 2015 后端alwaysOn建立,Lync 2013升级至SFB 2015并且后端进行AlwaysOn高可用建立(这个可能不靠谱--) 简要升级路线: Lync 2013:使用新的拓扑生成器生成新拓扑并发布,然后在池的每台关联服务器上就地升级功能升级 Lync 2010:首先升级至Lync 2013,然后使

pixhawk mc_pos_control.cpp源码解读

好久没跟新blog了,这段时期边调试边看程序,所以有点慢.要开始着手调试了.. 这篇blog是顺着上一篇pixhawk 整体架构的认识写的,接下来看程序的话,打算把各个功能模块理解一遍,先从mc_pos_control.cpp看起. 先提一下pixhawk的整体逻辑: commander和navigator产生期望位置 position_estimator是当前位置 通过pos_ctrl产生期望姿态 attitude_estimator是当前姿态 通过att_ctrl产生pwm的数值 最后通过m

Java做界面思路整理

说起大一就学过C++,但从未接触过VC++,至于做界面也是直到学java才开始,所以自己还是个新手啊... 步入正题,通过自己写的两个小程序,对做界面的思路进行一下整理. 首先,构想出自己想要实现的界面是什么样子.可以在纸上画出个轮廓(我是这么干的...),尽量详尽,比如点击按钮后的实现一个页面的跳转,跳转之后的页面也画出来.为什么要这样呢?都知道界面是由控件和容器组成的,画的目的就是清楚要用哪些组件,并且根据自己的界面,然后组织容器,再进而组织布局.对于布局可能会比较麻烦一点,这要根据你的窗口

大规模高性能网站架构设计思路整理

近期关注了一些主流高并发大型网站如:大众点评.携程.去哪儿等 整理实现思路如下: 一.第一步 1.js .CSS.图片 优化压缩 2.站点动静分离,将动态网站单独部署.静态网站单独部署 3.数据库读写分离,比如:高频率读写的表分离 4.数据库优化,分表.分库.索引等 二.负载均衡 1.软件负载均衡,如:lvs,ngnix等 2.硬件负载均衡,如:F5等 三.缓存 1.数据缓存,如:memcacahe 2.Varnish Cache 3.Squid 四: 1.CDN