feat: 可获取总数据量

This commit is contained in:
caoqianming 2026-02-04 14:11:34 +08:00
commit d4e616f83c
4 changed files with 3816 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
/target
logs/*
.env

3652
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

18
Cargo.toml Executable file
View File

@ -0,0 +1,18 @@
[package]
name = "hfnf_api"
version = "0.1.0"
edition = "2021"
[dependencies]
salvo = { version = "0.68", features = ["logging", "cors"]}
tokio = { version = "1", features = ["macros"] }
tracing = "0.1"
tracing-subscriber = "0.3"
sqlx = { version = "0.7", features = [ "runtime-tokio-native-tls" , "postgres", "chrono" ] }
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.117"
once_cell = "1.19.0"
chrono = { version = "0.4", features = ["serde"] }
serde_with = "3.8.1"
dotenv = "0.15.0"
tracing-appender = "0.2.3"

143
src/main.rs Executable file
View File

@ -0,0 +1,143 @@
use chrono::{DateTime, Utc, FixedOffset};
use once_cell::sync::OnceCell;
use salvo::logging::Logger;
use salvo::prelude::*;
use serde::{Deserialize, Serialize, Serializer};
use serde_json::json;
use sqlx::{FromRow, PgPool};
use std::env;
use dotenv::dotenv;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::{
fmt::{self},
layer::SubscriberExt,
util::SubscriberInitExt,
Layer,
};
use salvo::cors::Cors;
use salvo::http::Method;
static POSTGRES: OnceCell<PgPool> = OnceCell::new();
pub fn format_time_8<S>(time: &DateTime<Utc>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer
{
let china_timezone = FixedOffset::east_opt(8 * 3600).unwrap();
let cst_time = time.with_timezone(&china_timezone);
serializer.serialize_str(cst_time.format("%Y-%m-%d %H:%M:%S").to_string().as_str())
}
#[derive(FromRow, Serialize, Debug, Deserialize)]
pub struct Mplogx {
#[serde(serialize_with = "format_time_8")]
pub timex: DateTime<Utc>,
pub mpoint_id: String,
pub val_float: Option<f64>,
pub val_str: Option<String>
}
#[derive(Deserialize)]
pub struct MplogxQuery {
pub mpoint_id: Option<String>,
pub page: Option<i32>,
pub page_size: Option<i32>
}
#[inline]
fn get_postgres() -> &'static PgPool {
POSTGRES.get().unwrap()
}
#[handler]
async fn get_mplogx(req: &mut Request, res: &mut Response) {
// 解析查询参数
let query: MplogxQuery = req.parse_json().await.unwrap();
let page = query.page.unwrap_or(1).max(1);
let page_size = query.page_size.unwrap_or(20).max(1);
let offset = (page - 1) * page_size;
// 动态构建SQL区分有无mpoint_id条件
let (count_sql, data_sql) = match &query.mpoint_id {
Some(_) => (
"SELECT COUNT(*) FROM mplogx WHERE mpoint_id = $1".to_string(),
"SELECT * FROM mplogx WHERE mpoint_id = $1 ORDER BY timex DESC LIMIT $2 OFFSET $3".to_string()
),
None => (
"SELECT approximate_row_count('mplogx');".to_string(), // TimescaleDB快速计数
"SELECT * FROM mplogx ORDER BY timex DESC LIMIT $1 OFFSET $2".to_string()
)
};
// 查询总条数(区分精确计数和近似计数)
let total_count: i64 = match &query.mpoint_id {
Some(mpoint_id) => {
println!("xx");
sqlx::query_scalar(&count_sql)
.bind(mpoint_id)
.fetch_one(get_postgres())
.await
.unwrap_or(0)
},
None => {
sqlx::query_scalar(&count_sql)
.fetch_one(get_postgres())
.await
.unwrap_or(0)
}
};
// 查询分页数据
let data = match &query.mpoint_id {
Some(mpoint_id) => {
sqlx::query_as::<_, Mplogx>(&data_sql)
.bind(mpoint_id)
.bind(page_size)
.bind(offset)
.fetch_all(get_postgres())
.await
.unwrap_or_default()
},
None => {
sqlx::query_as::<_, Mplogx>(&data_sql)
.bind(page_size)
.bind(offset)
.fetch_all(get_postgres())
.await
.unwrap_or_default()
}
};
// 返回分页结果
let result = json!({
"count": total_count,
"results": data,
"page": page,
"page_size": page_size
});
res.render(serde_json::to_string(&result).unwrap());
}
#[tokio::main]
async fn main() {
dotenv().ok();
let console = fmt::Layer::new().with_ansi(false).pretty();
let file_appender = tracing_appender::rolling::daily("./logs", "api_log");
let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
let file = fmt::Layer::new().with_ansi(false).pretty().with_writer(non_blocking).with_filter(LevelFilter::WARN);
tracing_subscriber::registry().with(console).with(file).init();
let db_url = env::var("DB_URL").expect("DB_URL must be set");
let pool = PgPool::connect(&db_url).await.unwrap();
POSTGRES.set(pool).unwrap();
let cors_hander = Cors::new().allow_origin("*").allow_methods(
vec![Method::GET, Method::POST, Method::OPTIONS, Method::PUT, Method::DELETE]).allow_headers(
vec!["Content-Type", "Authorization"]).into_handler();
let router = Router::with_path("mplogx").post(get_mplogx);
let service = Service::new(router).hoop(cors_hander).hoop(Logger::new());
let acceptor = TcpListener::new("0.0.0.0:5800").bind().await;
Server::new(acceptor).serve(service).await;
}