From c3037e52cf54ef2854ca484a6dea8b707e4eb02f Mon Sep 17 00:00:00 2001 From: caoqianming Date: Thu, 16 Apr 2026 15:40:28 +0800 Subject: [PATCH] docs(code): clean garbled rust comments --- .../src/control/engine.rs | 16 +++++++-------- .../src/control/simulate.rs | 20 +++++++++---------- .../src/control/validator.rs | 4 ++-- crates/app_feeder_distributor/src/event.rs | 6 +++--- .../src/handler/point.rs | 6 +++--- .../app_feeder_distributor/src/handler/tag.rs | 18 ++++++++--------- .../app_feeder_distributor/src/middleware.rs | 6 +++--- .../plc_platform_core/src/util/pagination.rs | 8 ++++---- .../plc_platform_core/src/util/validator.rs | 8 ++++---- src/control/engine.rs | 14 ++++++------- src/control/simulate.rs | 18 ++++++++--------- src/handler/point.rs | 4 ++-- src/handler/tag.rs | 16 +++++++-------- src/middleware.rs | 4 ++-- 14 files changed, 74 insertions(+), 74 deletions(-) diff --git a/crates/app_feeder_distributor/src/control/engine.rs b/crates/app_feeder_distributor/src/control/engine.rs index 9d14378..589ff11 100644 --- a/crates/app_feeder_distributor/src/control/engine.rs +++ b/crates/app_feeder_distributor/src/control/engine.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::HashMap; use std::sync::Arc; use tokio::sync::Notify; @@ -52,12 +52,12 @@ async fn supervise(state: AppState, store: Arc) { } } -// 鈹€鈹€ Per-unit task 鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€ +// Per-unit task. async fn unit_task(state: AppState, store: Arc, unit_id: Uuid) { let notify = store.get_or_create_notify(unit_id).await; - // Fault/comm check ticker 鈥?still need periodic polling of point monitor data. + // Fault/comm check ticker; still need periodic polling of point monitor data. let mut fault_tick = tokio::time::interval(Duration::from_millis(500)); fault_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); @@ -76,7 +76,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu } }; - // 鈹€鈹€ Fault / comm check 鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€ + // Fault / comm check. let (kind_roles, kind_eq_ids, all_roles) = match load_equipment_maps(&state, unit_id).await { Ok(maps) => maps, Err(e) => { @@ -92,7 +92,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu push_ws(&state, &runtime).await; } - // 鈹€鈹€ Wait when not active 鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€ + // Wait when not active. if !runtime.auto_enabled || runtime.fault_locked || runtime.comm_locked || runtime.manual_ack_required { tokio::select! { _ = fault_tick.tick() => {} @@ -106,7 +106,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu continue; } - // 鈹€鈹€ State machine step 鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€ + // State machine step. match runtime.state { UnitRuntimeState::Stopped => { // Wait stop_time_sec (0 = skip wait, start immediately). @@ -165,7 +165,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu runtime.display_acc_sec = runtime.accumulated_run_sec; if unit.acc_time_sec > 0 && runtime.accumulated_run_sec >= unit.acc_time_sec as i64 * 1000 { - // Accumulated threshold reached 鈥?start distributor. + // Accumulated threshold reached; start distributor. let monitor = state.connection_manager.get_point_monitor_data_read_guard().await; let dist_cmd = kind_roles.get("distributor").and_then(|r| find_cmd(r, "start_cmd", &monitor)); drop(monitor); @@ -223,7 +223,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu } } -// 鈹€鈹€ Helpers 鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€ +// Helpers. /// Sleep for the duration appropriate to the *current* state, interrupting every /// 500 ms to re-check fault/comm. Returns `true` when the full time elapsed, diff --git a/crates/app_feeder_distributor/src/control/simulate.rs b/crates/app_feeder_distributor/src/control/simulate.rs index 5c8622a..11462a3 100644 --- a/crates/app_feeder_distributor/src/control/simulate.rs +++ b/crates/app_feeder_distributor/src/control/simulate.rs @@ -1,4 +1,4 @@ -use tokio::time::Duration; +use tokio::time::Duration; use uuid::Uuid; use crate::{ @@ -20,7 +20,7 @@ async fn run(state: AppState) { let mut rng = seed_rng(); loop { - // Wait a random 15鈥?0 s between events. + // Wait a random 15-60 s between events. let wait_secs = 15 + xorshift(&mut rng) % 46; tokio::time::sleep(Duration::from_secs(wait_secs)).await; @@ -31,7 +31,7 @@ async fn run(state: AppState) { }; let unit = &units[xorshift(&mut rng) as usize % units.len()]; - // Only target units with auto control running 鈥?otherwise the event is uninteresting. + // Only target units with auto control running; otherwise the event is uninteresting. let runtime = state.control_runtime.get(unit.id).await; if runtime.map_or(true, |r| !r.auto_enabled) { continue; @@ -68,11 +68,11 @@ async fn run(state: AppState) { .find(|p| p.signal_role == target_role) .unwrap(); - // rem=false 鈫? not in remote mode (blocks commands) - // flt=true 鈫? fault signal active (triggers fault lock) + // rem=false means the equipment is not in remote mode. + // flt=true means the equipment reports an active fault. let trigger_value = target_role == "flt"; - // Hold duration: 5鈥?5 s for rem, 3鈥?0 s for flt. + // Hold duration: 5-15 s for rem, 3-10 s for flt. let hold_secs = if target_role == "flt" { 3 + xorshift(&mut rng) % 8 } else { @@ -80,20 +80,20 @@ async fn run(state: AppState) { }; tracing::info!( - "[chaos] unit={} eq={} role={} 鈫?{} (hold {}s)", + "[chaos] unit={} eq={} role={} -> {} (hold {}s)", unit.code, eq.code, target_role, if trigger_value { "FAULT" } else { "REM OFF" }, hold_secs ); - + patch_signal(&state, target_point.point_id, trigger_value).await; patch_signal(&state, target_point.point_id, trigger_value).await; tokio::time::sleep(Duration::from_secs(hold_secs)).await; patch_signal(&state, target_point.point_id, !trigger_value).await; tracing::info!( - "[chaos] unit={} eq={} role={} 鈫?RESTORED", + "[chaos] unit={} eq={} role={} -> RESTORED", unit.code, eq.code, target_role @@ -196,7 +196,7 @@ pub async fn patch_signal(state: &AppState, point_id: Uuid, value_on: bool) { .await; } -// 鈹€鈹€ Minimal XorShift64 PRNG (no external crate needed) 鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€鈹€ +// Minimal XorShift64 PRNG (no external crate needed). fn seed_rng() -> u64 { std::time::SystemTime::now() diff --git a/crates/app_feeder_distributor/src/control/validator.rs b/crates/app_feeder_distributor/src/control/validator.rs index 73eab2b..0767bc4 100644 --- a/crates/app_feeder_distributor/src/control/validator.rs +++ b/crates/app_feeder_distributor/src/control/validator.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::HashMap; use serde_json::json; use uuid::Uuid; @@ -117,7 +117,7 @@ pub async fn validate_manual_control( drop(monitor_guard); - // Runtime state checks 鈥?block commands if unit is locked + // Runtime state checks; block commands if the unit is locked. if let Some(unit_id) = equipment.unit_id { if let Some(runtime) = state.control_runtime.get(unit_id).await { if runtime.auto_enabled { diff --git a/crates/app_feeder_distributor/src/event.rs b/crates/app_feeder_distributor/src/event.rs index c663548..7f68d22 100644 --- a/crates/app_feeder_distributor/src/event.rs +++ b/crates/app_feeder_distributor/src/event.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::HashMap; use plc_platform_core::event::EventEnvelope; use tokio::sync::mpsc; use uuid::Uuid; @@ -265,7 +265,7 @@ async fn handle_control_event( tracing::info!("REM recovered for unit {}", unit_id); } AppEvent::UnitStateChanged { unit_id, from_state, to_state } => { - tracing::info!("Unit {} state: {} 鈫?{}", unit_id, from_state, to_state); + tracing::info!("Unit {} state: {} -> {}", unit_id, from_state, to_state); } AppEvent::PointNewValue(_) => { tracing::warn!("PointNewValue routed to control worker unexpectedly"); @@ -508,7 +508,7 @@ async fn process_point_new_value( .and_then(|s| s.client_handle_map.get(&client_handle).copied()) }; if let Some(point_id) = point_id { - // 浠庣紦瀛樹腑璇诲彇鏃у€? + // Read the previous value from the in-memory cache. let (old_value, old_timestamp, value_changed) = { let monitor_data = connection_manager.get_point_monitor_data_read_guard().await; let old_monitor_info = monitor_data.get(&point_id); diff --git a/crates/app_feeder_distributor/src/handler/point.rs b/crates/app_feeder_distributor/src/handler/point.rs index 2eb5194..0f9b39e 100644 --- a/crates/app_feeder_distributor/src/handler/point.rs +++ b/crates/app_feeder_distributor/src/handler/point.rs @@ -1,4 +1,4 @@ -use axum::{ +use axum::{ extract::{Path, Query, State}, http::HeaderMap, response::IntoResponse, @@ -71,10 +71,10 @@ pub async fn get_point_list( query.validate()?; let pool = &state.pool; - // 鑾峰彇鎬绘暟 + // Count total rows. let total = crate::service::get_points_count(pool, query.source_id, query.equipment_id).await?; - // 鑾峰彇鍒嗛〉鏁版嵁 + // Load current page rows. let points = crate::service::get_points_paginated( pool, query.source_id, diff --git a/crates/app_feeder_distributor/src/handler/tag.rs b/crates/app_feeder_distributor/src/handler/tag.rs index 4d30453..dcc28fc 100644 --- a/crates/app_feeder_distributor/src/handler/tag.rs +++ b/crates/app_feeder_distributor/src/handler/tag.rs @@ -1,4 +1,4 @@ -use axum::{Json, extract::{Path, Query, State}, http::StatusCode, response::IntoResponse}; +use axum::{Json, extract::{Path, Query, State}, http::StatusCode, response::IntoResponse}; use serde::Deserialize; use uuid::Uuid; use validator::Validate; @@ -9,7 +9,7 @@ use plc_platform_core::util::{ }; use crate::{AppState}; -/// 鑾峰彇鎵€鏈夋爣绛? +/// List all tags. #[derive(Deserialize, Validate)] pub struct GetTagListQuery { #[serde(flatten)] @@ -23,10 +23,10 @@ pub async fn get_tag_list( query.validate()?; let pool = &state.pool; - // 鑾峰彇鎬绘暟 + // Count total rows. let total = crate::service::get_tags_count(pool).await?; - // 鑾峰彇鍒嗛〉鏁版嵁 + // Load current page rows. let tags = crate::service::get_tags_paginated( pool, query.pagination.page_size, @@ -38,7 +38,7 @@ pub async fn get_tag_list( Ok(Json(response)) } -/// 鑾峰彇鏍囩涓嬬殑鐐逛綅淇℃伅 +/// List points under a tag. pub async fn get_tag_points( State(state): State, Path(tag_id): Path, @@ -63,7 +63,7 @@ pub struct UpdateTagReq { pub point_ids: Option>, } -/// 鍒涘缓鏍囩 +/// Create a tag. pub async fn create_tag( State(state): State, Json(payload): Json, @@ -84,7 +84,7 @@ pub async fn create_tag( })))) } -/// 鏇存柊鏍囩 +/// Update a tag. pub async fn update_tag( State(state): State, Path(tag_id): Path, @@ -92,7 +92,7 @@ pub async fn update_tag( ) -> Result { payload.validate()?; - // 妫€鏌ユ爣绛炬槸鍚﹀瓨鍦? + // Ensure the target tag exists. let exists = crate::service::get_tag_by_id(&state.pool, tag_id).await?; if exists.is_none() { return Err(ApiErr::NotFound("Tag not found".to_string(), None)); @@ -111,7 +111,7 @@ pub async fn update_tag( }))) } -/// 鍒犻櫎鏍囩 +/// Delete a tag. pub async fn delete_tag( State(state): State, Path(tag_id): Path, diff --git a/crates/app_feeder_distributor/src/middleware.rs b/crates/app_feeder_distributor/src/middleware.rs index 5686f0d..a06052d 100644 --- a/crates/app_feeder_distributor/src/middleware.rs +++ b/crates/app_feeder_distributor/src/middleware.rs @@ -1,4 +1,4 @@ -use axum::{ +use axum::{ body::Body, http::Request, middleware::Next, @@ -10,9 +10,9 @@ pub async fn simple_logger( req: Request, next: Next, ) -> Response { - // 鐩存帴鑾峰彇瀛楃涓插紩鐢紝涓嶇敤鍏嬮殕 + // Borrow the path string directly; no clone needed. let method = req.method().to_string(); - let uri = req.uri().to_string(); // Uri 鐨?to_string() 鍒涘缓鏂板瓧绗︿覆 + let uri = req.uri().to_string(); // `Uri::to_string()` allocates the owned string once. let start = Instant::now(); let res = next.run(req).await; diff --git a/crates/plc_platform_core/src/util/pagination.rs b/crates/plc_platform_core/src/util/pagination.rs index 158c7c0..bbf28b5 100644 --- a/crates/plc_platform_core/src/util/pagination.rs +++ b/crates/plc_platform_core/src/util/pagination.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use validator::Validate; -/// 鍒嗛〉鍝嶅簲缁撴瀯 +/// Paginated API response payload. #[derive(Serialize)] pub struct PaginatedResponse { pub data: Vec, @@ -13,7 +13,7 @@ pub struct PaginatedResponse { } impl PaginatedResponse { - /// 鍒涘缓鍒嗛〉鍝嶅簲 + /// Build a paginated response from rows and total count. pub fn new(data: Vec, total: i64, page: u32, page_size: i32) -> Self { let total_pages = if page_size > 0 { ((total as f64) / (page_size as f64)).ceil() as u32 @@ -31,7 +31,7 @@ impl PaginatedResponse { } } -/// 鍒嗛〉鏌ヨ鍙傛暟 +/// Common pagination query parameters. #[serde_as] #[derive(Debug, Deserialize, Validate)] pub struct PaginationParams { @@ -54,7 +54,7 @@ fn default_page_size() -> i32 { } impl PaginationParams { - /// 璁$畻鍋忕Щ閲? + /// Row offset derived from the current page and page size. pub fn offset(&self) -> u32 { (self.page - 1) * self.page_size.max(0) as u32 } diff --git a/crates/plc_platform_core/src/util/validator.rs b/crates/plc_platform_core/src/util/validator.rs index 0e210b3..24d4ce8 100644 --- a/crates/plc_platform_core/src/util/validator.rs +++ b/crates/plc_platform_core/src/util/validator.rs @@ -4,9 +4,9 @@ use validator::ValidationErrors; impl From for ApiErr { fn from(errors: ValidationErrors) -> Self { - // 鏋勫缓璇︾粏鐨勯敊璇俊鎭? + // Build a detailed validation error message. let mut error_details = serde_json::Map::new(); - let mut first_error_msg = String::from("璇锋眰鍙傛暟楠岃瘉澶辫触"); + let mut first_error_msg = String::from("鐠囬攱鐪伴崣鍌涙殶妤犲矁鐦夋径杈Е"); for (field, field_errors) in errors.field_errors() { let error_list: Vec = field_errors @@ -19,8 +19,8 @@ impl From for ApiErr { .collect(); error_details.insert(field.to_string(), json!(error_list)); - // 鑾峰彇绗竴涓瓧娈电殑绗竴涓敊璇俊鎭? - if first_error_msg == "璇锋眰鍙傛暟楠岃瘉澶辫触" && !error_list.is_empty() { + // Use the first available field error as the summary. + if first_error_msg == "鐠囬攱鐪伴崣鍌涙殶妤犲矁鐦夋径杈Е" && !error_list.is_empty() { if let Some(msg) = field_errors[0].message.as_ref() { first_error_msg = format!("{}: {}", field, msg); } else { diff --git a/src/control/engine.rs b/src/control/engine.rs index 883e78a..589ff11 100644 --- a/src/control/engine.rs +++ b/src/control/engine.rs @@ -52,12 +52,12 @@ async fn supervise(state: AppState, store: Arc) { } } -// ── Per-unit task ───────────────────────────────────────────────────────────── +// Per-unit task. async fn unit_task(state: AppState, store: Arc, unit_id: Uuid) { let notify = store.get_or_create_notify(unit_id).await; - // Fault/comm check ticker — still need periodic polling of point monitor data. + // Fault/comm check ticker; still need periodic polling of point monitor data. let mut fault_tick = tokio::time::interval(Duration::from_millis(500)); fault_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); @@ -76,7 +76,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu } }; - // ── Fault / comm check ──────────────────────────────────────────────── + // Fault / comm check. let (kind_roles, kind_eq_ids, all_roles) = match load_equipment_maps(&state, unit_id).await { Ok(maps) => maps, Err(e) => { @@ -92,7 +92,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu push_ws(&state, &runtime).await; } - // ── Wait when not active ────────────────────────────────────────────── + // Wait when not active. if !runtime.auto_enabled || runtime.fault_locked || runtime.comm_locked || runtime.manual_ack_required { tokio::select! { _ = fault_tick.tick() => {} @@ -106,7 +106,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu continue; } - // ── State machine step ──────────────────────────────────────────────── + // State machine step. match runtime.state { UnitRuntimeState::Stopped => { // Wait stop_time_sec (0 = skip wait, start immediately). @@ -165,7 +165,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu runtime.display_acc_sec = runtime.accumulated_run_sec; if unit.acc_time_sec > 0 && runtime.accumulated_run_sec >= unit.acc_time_sec as i64 * 1000 { - // Accumulated threshold reached — start distributor. + // Accumulated threshold reached; start distributor. let monitor = state.connection_manager.get_point_monitor_data_read_guard().await; let dist_cmd = kind_roles.get("distributor").and_then(|r| find_cmd(r, "start_cmd", &monitor)); drop(monitor); @@ -223,7 +223,7 @@ async fn unit_task(state: AppState, store: Arc, unit_id: Uu } } -// ── Helpers ─────────────────────────────────────────────────────────────────── +// Helpers. /// Sleep for the duration appropriate to the *current* state, interrupting every /// 500 ms to re-check fault/comm. Returns `true` when the full time elapsed, diff --git a/src/control/simulate.rs b/src/control/simulate.rs index 61129c3..11462a3 100644 --- a/src/control/simulate.rs +++ b/src/control/simulate.rs @@ -20,7 +20,7 @@ async fn run(state: AppState) { let mut rng = seed_rng(); loop { - // Wait a random 15–60 s between events. + // Wait a random 15-60 s between events. let wait_secs = 15 + xorshift(&mut rng) % 46; tokio::time::sleep(Duration::from_secs(wait_secs)).await; @@ -31,7 +31,7 @@ async fn run(state: AppState) { }; let unit = &units[xorshift(&mut rng) as usize % units.len()]; - // Only target units with auto control running — otherwise the event is uninteresting. + // Only target units with auto control running; otherwise the event is uninteresting. let runtime = state.control_runtime.get(unit.id).await; if runtime.map_or(true, |r| !r.auto_enabled) { continue; @@ -68,11 +68,11 @@ async fn run(state: AppState) { .find(|p| p.signal_role == target_role) .unwrap(); - // rem=false → not in remote mode (blocks commands) - // flt=true → fault signal active (triggers fault lock) + // rem=false means the equipment is not in remote mode. + // flt=true means the equipment reports an active fault. let trigger_value = target_role == "flt"; - // Hold duration: 5–15 s for rem, 3–10 s for flt. + // Hold duration: 5-15 s for rem, 3-10 s for flt. let hold_secs = if target_role == "flt" { 3 + xorshift(&mut rng) % 8 } else { @@ -80,20 +80,20 @@ async fn run(state: AppState) { }; tracing::info!( - "[chaos] unit={} eq={} role={} → {} (hold {}s)", + "[chaos] unit={} eq={} role={} -> {} (hold {}s)", unit.code, eq.code, target_role, if trigger_value { "FAULT" } else { "REM OFF" }, hold_secs ); - + patch_signal(&state, target_point.point_id, trigger_value).await; patch_signal(&state, target_point.point_id, trigger_value).await; tokio::time::sleep(Duration::from_secs(hold_secs)).await; patch_signal(&state, target_point.point_id, !trigger_value).await; tracing::info!( - "[chaos] unit={} eq={} role={} → RESTORED", + "[chaos] unit={} eq={} role={} -> RESTORED", unit.code, eq.code, target_role @@ -196,7 +196,7 @@ pub async fn patch_signal(state: &AppState, point_id: Uuid, value_on: bool) { .await; } -// ── Minimal XorShift64 PRNG (no external crate needed) ──────────────────────── +// Minimal XorShift64 PRNG (no external crate needed). fn seed_rng() -> u64 { std::time::SystemTime::now() diff --git a/src/handler/point.rs b/src/handler/point.rs index e38870c..0f9b39e 100644 --- a/src/handler/point.rs +++ b/src/handler/point.rs @@ -71,10 +71,10 @@ pub async fn get_point_list( query.validate()?; let pool = &state.pool; - // 获取总数 + // Count total rows. let total = crate::service::get_points_count(pool, query.source_id, query.equipment_id).await?; - // 获取分页数据 + // Load current page rows. let points = crate::service::get_points_paginated( pool, query.source_id, diff --git a/src/handler/tag.rs b/src/handler/tag.rs index 168f945..dcc28fc 100644 --- a/src/handler/tag.rs +++ b/src/handler/tag.rs @@ -9,7 +9,7 @@ use plc_platform_core::util::{ }; use crate::{AppState}; -/// 获取所有标签 +/// List all tags. #[derive(Deserialize, Validate)] pub struct GetTagListQuery { #[serde(flatten)] @@ -23,10 +23,10 @@ pub async fn get_tag_list( query.validate()?; let pool = &state.pool; - // 获取总数 + // Count total rows. let total = crate::service::get_tags_count(pool).await?; - // 获取分页数据 + // Load current page rows. let tags = crate::service::get_tags_paginated( pool, query.pagination.page_size, @@ -38,7 +38,7 @@ pub async fn get_tag_list( Ok(Json(response)) } -/// 获取标签下的点位信息 +/// List points under a tag. pub async fn get_tag_points( State(state): State, Path(tag_id): Path, @@ -63,7 +63,7 @@ pub struct UpdateTagReq { pub point_ids: Option>, } -/// 创建标签 +/// Create a tag. pub async fn create_tag( State(state): State, Json(payload): Json, @@ -84,7 +84,7 @@ pub async fn create_tag( })))) } -/// 更新标签 +/// Update a tag. pub async fn update_tag( State(state): State, Path(tag_id): Path, @@ -92,7 +92,7 @@ pub async fn update_tag( ) -> Result { payload.validate()?; - // 检查标签是否存在 + // Ensure the target tag exists. let exists = crate::service::get_tag_by_id(&state.pool, tag_id).await?; if exists.is_none() { return Err(ApiErr::NotFound("Tag not found".to_string(), None)); @@ -111,7 +111,7 @@ pub async fn update_tag( }))) } -/// 删除标签 +/// Delete a tag. pub async fn delete_tag( State(state): State, Path(tag_id): Path, diff --git a/src/middleware.rs b/src/middleware.rs index ebc4b84..a06052d 100644 --- a/src/middleware.rs +++ b/src/middleware.rs @@ -10,9 +10,9 @@ pub async fn simple_logger( req: Request, next: Next, ) -> Response { - // 直接获取字符串引用,不用克隆 + // Borrow the path string directly; no clone needed. let method = req.method().to_string(); - let uri = req.uri().to_string(); // Uri 的 to_string() 创建新字符串 + let uri = req.uri().to_string(); // `Uri::to_string()` allocates the owned string once. let start = Instant::now(); let res = next.run(req).await;