plc_control/src/handler/point.rs

472 lines
13 KiB
Rust

use axum::{Json, extract::{Path, Query, State}, http::HeaderMap, response::IntoResponse};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use validator::Validate;
use sqlx::{Row, QueryBuilder};
use crate::util::{response::ApiErr, pagination::{PaginatedResponse, PaginationParams}};
use crate::{
AppState,
model::{Node, Point},
};
/// List all points.
#[derive(Deserialize, Validate)]
pub struct GetPointListQuery {
pub source_id: Option<Uuid>,
#[serde(flatten)]
pub pagination: PaginationParams,
}
#[derive(Serialize)]
pub struct PointWithMonitor {
#[serde(flatten)]
pub point: Point,
pub point_monitor: Option<crate::telemetry::PointMonitorInfo>,
}
pub async fn get_point_list(
State(state): State<AppState>,
Query(query): Query<GetPointListQuery>,
) -> Result<impl IntoResponse, ApiErr> {
query.validate()?;
let pool = &state.pool;
// 获取总数
let total = crate::service::get_points_count(pool, query.source_id).await?;
// 获取分页数据
let points = crate::service::get_points_paginated(
pool,
query.source_id,
query.pagination.page_size,
query.pagination.offset(),
).await?;
let monitor_guard = state
.connection_manager
.get_point_monitor_data_read_guard()
.await;
let data: Vec<PointWithMonitor> = points
.into_iter()
.map(|point| {
let point_monitor = monitor_guard
.get(&point.id)
.cloned();
PointWithMonitor { point, point_monitor }
})
.collect();
let response = PaginatedResponse::new(data, total, query.pagination.page, query.pagination.page_size);
Ok(Json(response))
}
/// Get a point by id.
pub async fn get_point(
State(state): State<AppState>,
Path(point_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiErr> {
let pool = &state.pool;
let point = crate::service::get_point_by_id(pool, point_id).await?;
Ok(Json(point))
}
/// Request payload for updating editable point fields.
#[derive(Deserialize, Validate)]
pub struct UpdatePointReq {
pub name: Option<String>,
pub description: Option<String>,
pub unit: Option<String>,
pub tag_id: Option<Uuid>,
}
/// Request payload for batch setting point tags.
#[derive(Deserialize, Validate)]
pub struct BatchSetPointTagsReq {
pub point_ids: Vec<Uuid>,
pub tag_id: Option<Uuid>,
}
/// Update point metadata (name/description/unit only).
pub async fn update_point(
State(state): State<AppState>,
Path(point_id): Path<Uuid>,
Json(payload): Json<UpdatePointReq>,
) -> Result<impl IntoResponse, ApiErr> {
payload.validate()?;
let pool = &state.pool;
if payload.name.is_none() && payload.description.is_none() && payload.unit.is_none() && payload.tag_id.is_none() {
return Ok(Json(serde_json::json!({"ok_msg": "No fields to update"})));
}
// If tag_id is provided, ensure tag exists.
if let Some(tag_id) = payload.tag_id {
let tag_exists = sqlx::query(
r#"SELECT 1 FROM tag WHERE id = $1"#,
)
.bind(tag_id)
.fetch_optional(pool)
.await?
.is_some();
if !tag_exists {
return Err(ApiErr::NotFound("Tag not found".to_string(), None));
}
}
// Ensure target point exists.
let existing_point = sqlx::query_as::<_, Point>(
r#"SELECT * FROM point WHERE id = $1"#,
)
.bind(point_id)
.fetch_optional(pool)
.await?;
if existing_point.is_none() {
return Err(ApiErr::NotFound("Point not found".to_string(), None));
}
let mut qb = QueryBuilder::new("UPDATE point SET ");
let mut sep = qb.separated(", ");
if let Some(name) = &payload.name {
sep.push("name = ").push_bind(name);
}
if let Some(description) = &payload.description {
sep.push("description = ").push_bind(description);
}
if let Some(unit) = &payload.unit {
sep.push("unit = ").push_bind(unit);
}
if let Some(tag_id) = &payload.tag_id {
sep.push("tag_id = ").push_bind(tag_id);
}
sep.push("updated_at = NOW()");
qb.push(" WHERE id = ").push_bind(point_id);
qb.build().execute(pool).await?;
Ok(Json(serde_json::json!({"ok_msg": "Point updated successfully"})))
}
/// Batch set point tags.
pub async fn batch_set_point_tags(
State(state): State<AppState>,
Json(payload): Json<BatchSetPointTagsReq>,
) -> Result<impl IntoResponse, ApiErr> {
payload.validate()?;
if payload.point_ids.is_empty() {
return Err(ApiErr::BadRequest("point_ids cannot be empty".to_string(), None));
}
let pool = &state.pool;
// If tag_id is provided, ensure tag exists.
if let Some(tag_id) = payload.tag_id {
let tag_exists = sqlx::query(
r#"SELECT 1 FROM tag WHERE id = $1"#,
)
.bind(tag_id)
.fetch_optional(pool)
.await?
.is_some();
if !tag_exists {
return Err(ApiErr::NotFound("Tag not found".to_string(), None));
}
}
// Check which points exist
let existing_points: Vec<Uuid> = sqlx::query(
r#"SELECT id FROM point WHERE id = ANY($1)"#,
)
.bind(&payload.point_ids)
.fetch_all(pool)
.await?
.into_iter()
.map(|row: sqlx::postgres::PgRow| row.get::<Uuid, _>("id"))
.collect();
if existing_points.is_empty() {
return Err(ApiErr::NotFound("No valid points found".to_string(), None));
}
// Update tag_id for all existing points
let result = sqlx::query(
r#"UPDATE point SET tag_id = $1, updated_at = NOW() WHERE id = ANY($2)"#,
)
.bind(payload.tag_id)
.bind(&existing_points)
.execute(pool)
.await?;
Ok(Json(serde_json::json!({
"ok_msg": "Point tags updated successfully",
"updated_count": result.rows_affected()
})))
}
/// Delete one point by id.
pub async fn delete_point(
State(state): State<AppState>,
Path(point_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiErr> {
let pool = &state.pool;
let source_id = {
let grouped = crate::service::get_points_grouped_by_source(pool, &[point_id]).await?;
grouped.keys().next().copied()
};
// Ensure target point exists.
let existing_point = sqlx::query_as::<_, Point>(
r#"SELECT * FROM point WHERE id = $1"#,
)
.bind(point_id)
.fetch_optional(pool)
.await?;
if existing_point.is_none() {
return Err(ApiErr::NotFound("Point not found".to_string(), None));
}
// Delete point.
sqlx::query(
r#"delete from point WHERE id = $1"#,
)
.bind(point_id)
.execute(pool)
.await?;
if let Some(source_id) = source_id {
if let Err(e) = state
.event_manager
.send(crate::event::ReloadEvent::PointDeleteBatch {
source_id,
point_ids: vec![point_id],
})
{
tracing::error!("Failed to send PointDeleteBatch event: {}", e);
}
}
Ok(Json(serde_json::json!({"ok_msg": "Point deleted successfully"})))
}
#[derive(Deserialize, Validate)]
/// Request payload for batch point creation from node ids.
pub struct BatchCreatePointsReq {
pub node_ids: Vec<Uuid>,
}
#[derive(Serialize)]
/// Response payload for batch point creation.
pub struct BatchCreatePointsRes {
pub success_count: usize,
pub failed_count: usize,
pub failed_node_ids: Vec<Uuid>,
pub created_point_ids: Vec<Uuid>,
}
/// Batch create points by node ids.
pub async fn batch_create_points(
State(state): State<AppState>,
Json(payload): Json<BatchCreatePointsReq>,
) -> Result<impl IntoResponse, ApiErr> {
payload.validate()?;
let pool = &state.pool;
if payload.node_ids.is_empty() {
return Err(ApiErr::BadRequest("node_ids cannot be empty".to_string(), None));
}
let mut success_count = 0;
let mut failed_count = 0;
let mut failed_node_ids = Vec::new();
let mut created_point_ids = Vec::new();
// Use one transaction for the full batch.
let mut tx = pool.begin().await?;
for node_id in payload.node_ids {
// Ensure node exists.
let node_exists = sqlx::query(
r#"SELECT 1 FROM node WHERE id = $1"#,
)
.bind(node_id)
.fetch_optional(&mut *tx)
.await?
.is_some();
if !node_exists {
failed_count += 1;
failed_node_ids.push(node_id);
continue;
}
// Skip nodes that already have a point.
let point_exists = sqlx::query(
r#"SELECT 1 FROM point WHERE node_id = $1"#,
)
.bind(node_id)
.fetch_optional(&mut *tx)
.await?
.is_some();
if point_exists {
continue;
}
// Use node browse_name as default point name.
let node_info = sqlx::query_as::<_, Node>(
r#"SELECT * FROM node WHERE id = $1"#,
)
.bind(node_id)
.fetch_optional(&mut *tx)
.await?;
let name = match node_info {
Some(node) => node.browse_name.clone(),
None => format!("Point_{}", node_id),
};
let new_id = Uuid::new_v4();
sqlx::query(
r#"
INSERT INTO point (id, node_id, name)
VALUES ($1, $2, $3)
"#
)
.bind(new_id)
.bind(node_id)
.bind(&name)
.execute(&mut *tx)
.await?;
success_count += 1;
created_point_ids.push(new_id);
}
// Commit the transaction.
tx.commit().await?;
// Emit grouped create events by source.
if !created_point_ids.is_empty() {
let grouped = crate::service::get_points_grouped_by_source(pool, &created_point_ids).await?;
for (source_id, points) in grouped {
let point_ids: Vec<Uuid> = points.into_iter().map(|p| p.point_id).collect();
if let Err(e) = state
.event_manager
.send(crate::event::ReloadEvent::PointCreateBatch { source_id, point_ids })
{
tracing::error!("Failed to send PointCreateBatch event: {}", e);
}
}
}
Ok(Json(BatchCreatePointsRes {
success_count,
failed_count,
failed_node_ids,
created_point_ids,
}))
}
#[derive(Deserialize, Validate)]
/// Request payload for batch point deletion.
pub struct BatchDeletePointsReq {
pub point_ids: Vec<Uuid>,
}
#[derive(Serialize)]
/// Response payload for batch point deletion.
pub struct BatchDeletePointsRes {
pub deleted_count: u64,
}
/// Batch delete points and emit grouped delete events by source.
pub async fn batch_delete_points(
State(state): State<AppState>,
Json(payload): Json<BatchDeletePointsReq>,
) -> Result<impl IntoResponse, ApiErr> {
payload.validate()?;
if payload.point_ids.is_empty() {
return Err(ApiErr::BadRequest("point_ids cannot be empty".to_string(), None));
}
let pool = &state.pool;
let point_ids = payload.point_ids;
let grouped = crate::service::get_points_grouped_by_source(pool, &point_ids).await?;
let existing_point_ids: Vec<Uuid> = grouped
.values()
.flat_map(|points| points.iter().map(|p| p.point_id))
.collect();
if existing_point_ids.is_empty() {
return Ok(Json(BatchDeletePointsRes { deleted_count: 0 }));
}
let result = sqlx::query(
r#"DELETE FROM point WHERE id = ANY($1)"#,
)
.bind(&existing_point_ids)
.execute(pool)
.await?;
for (source_id, points) in grouped {
let ids: Vec<Uuid> = points.into_iter().map(|p| p.point_id).collect();
if let Err(e) = state
.event_manager
.send(crate::event::ReloadEvent::PointDeleteBatch {
source_id,
point_ids: ids,
})
{
tracing::error!("Failed to send PointDeleteBatch event: {}", e);
}
}
Ok(Json(BatchDeletePointsRes {
deleted_count: result.rows_affected(),
}))
}
pub async fn batch_set_point_value(
State(state): State<AppState>,
headers: HeaderMap,
Json(payload): Json<crate::connection::BatchSetPointValueReq>,
) -> Result<impl IntoResponse, ApiErr> {
let write_key = headers
.get("X-Write-Key")
.and_then(|v| v.to_str().ok())
.unwrap_or_default();
if !state.config.verify_write_key(write_key) {
return Err(ApiErr::Forbidden(
"write permission denied".to_string(),
Some(serde_json::json!({
"hint": "set WRITE_API_KEY (or legacy WRITE_KEY) and pass header X-Write-Key"
})),
));
}
let result = state.connection_manager.write_point_values_batch(payload)
.await
.map_err(|e| ApiErr::Internal(e, None))?;
Ok(Json(result))
}