This commit is contained in:
zty 2025-06-20 09:44:57 +08:00
commit 6a5ec34ef8
29 changed files with 680 additions and 129 deletions

View File

@ -5,7 +5,7 @@ from apps.bi.models import Dataset
import concurrent
from apps.utils.sql import execute_raw_sql, format_sqldata
forbidden_keywords = ["UPDATE", "DELETE", "DROP", "TRUNCATE"]
forbidden_keywords = ["UPDATE", "DELETE", "DROP", "TRUNCATE", "INSERT", "CREATE", "ALTER", "GRANT", "REVOKE", "EXEC", "EXECUTE"]
def check_sql_safe(sql: str):

View File

@ -4,9 +4,49 @@ import json
import time
from django.core.cache import cache
from apps.utils.thread import MyThread
import struct
import uuid
import logging
def get_tyy_data_t(host, port):
cd_thread_key = f"cd_thread_{host}_{port}"
myLogger = logging.getLogger('log')
def get_checksum(body_msg):
return sum(body_msg) & 0xFF
def handle_bytes(arr):
if len(arr) < 8:
return "返回数据长度错误"
if arr[0] != 0xEB or arr[1] != 0x90:
return "数据头不正确"
# 读取长度信息
length_arr = arr[2:4][::-1] # 反转字节
length = int.from_bytes(length_arr, byteorder='little', signed=True) # 小端格式
# 提取内容
body = arr[4:4 + length - 3]
# 校验和检查
check_sum = get_checksum(body)
if check_sum != arr[length + 1]:
return "校验错误"
# 尾部标识检查
if arr[length + 2] != 0xFF or arr[length + 3] != 0xFE:
return "尾错误"
content = body.decode('utf-8')
res = json.loads(content)
return res[0]
def get_tyy_data_t(host, port, tid):
cd_thread_key_id = f"cd_thread_{host}_{port}_id"
cd_thread_key_val = f"cd_thread_{host}_{port}_val"
sc = None
def connect_and_send(retry=1):
nonlocal sc
@ -16,71 +56,141 @@ def get_tyy_data_t(host, port):
sc.connect((host, int(port)))
sc.sendall(b"R")
except BrokenPipeError:
sc = None
if retry > 0:
connect_and_send(retry-1)
else:
if sc:
try:
sc.close()
except Exception:
pass
sc = None
except OSError as e:
sc = None
if retry > 0:
connect_and_send(retry-1)
else:
cache.set(cd_thread_key, {"err_msg": f"采集器连接失败-{str(e)}"})
cache.set(cd_thread_key_val, {"err_msg": f"采集器连接失败-{str(e)}"})
except ConnectionResetError:
sc = None
if retry > 0:
connect_and_send(retry-1)
else:
cache.set(cd_thread_key, {"err_msg": "采集器重置了连接"})
cache.set(cd_thread_key_val, {"err_msg": "采集器重置了连接"})
except socket.timeout:
cache.set(cd_thread_key, {"err_msg": "采集器连接超时"})
sc = None
cache.set(cd_thread_key_val, {"err_msg": "采集器连接超时"})
except Exception as e:
sc = None
cache.set(cd_thread_key_val, {"err_msg": f"采集器连接失败-{str(e)}"})
while cache.get(cd_thread_key_id) == tid:
if cache.get(cd_thread_key_val) == "get":
cache.set(cd_thread_key_val, "working")
connect_and_send()
if sc is None:
continue
resp = sc.recv(1024)
res = handle_bytes(resp)
if isinstance(res, str):
cache.set(cd_thread_key_val, {"err_msg": f'采集器返回数据错误-{res}'})
elif not res:
cache.set(cd_thread_key_val, {"err_msg": f"采集器返回数据为空-{str(res)}"})
else:
myLogger.info(f"采集器返回数据-{res}")
cache.set(cd_thread_key_val, res)
time.sleep(0.3)
if sc:
try:
sc.close()
except Exception:
pass
def get_tyy_data_2(*args, retry=1):
host, port = args[0], int(args[1])
cd_thread_key_id = f"cd_thread_{host}_{port}_id"
cd_thread_key_val = f"cd_thread_{host}_{port}_val"
cd_thread_val_id = cache.get(cd_thread_key_id, default=None)
if cd_thread_val_id is None:
tid = uuid.uuid4()
cache.set(cd_thread_key_id, tid, timeout=10800)
cd_thread = MyThread(target=get_tyy_data_t, args=(host, port, tid), daemon=True)
cd_thread.start()
cache.set(cd_thread_key_val, "get")
num = 0
get_val = False
while True:
cd_thread_val = cache.get(cd_thread_key, default=None)
if cd_thread_val is None:
num += 1
if num > 8:
break
val = cache.get(cd_thread_key_val)
if isinstance(val, dict):
get_val = True
if "err_msg" in val:
raise ParseError(val["err_msg"])
return val
time.sleep(0.3)
if not get_val and retry > 0:
cache.set(cd_thread_key_id, None)
get_tyy_data_2(*args, retry=retry-1)
sc_all = {}
def get_tyy_data(*args):
host, port = args[0], int(args[1])
global sc_all
sc = None
def connect_and_send(retry=1):
nonlocal sc
sc = sc_all.get(f"{host}_{port}", None)
try:
if sc is None:
sc = socket.socket()
sc.settimeout(5) # 设置超时
sc.connect((host, port))
sc_all[f"{host}_{port}"] = sc
sc.sendall(b"R")
except BrokenPipeError:
if retry > 0:
if sc:
try:
sc.close()
except Exception:
pass
sc_all.pop(f"{host}_{port}", None)
return connect_and_send(retry-1)
else:
if sc:
try:
sc.close()
except Exception:
pass
sc_all.pop(f"{host}_{port}", None)
sc = None
raise ParseError("采集器连接失败-管道重置")
except OSError as e:
if sc:
try:
sc.close()
except Exception:
pass
break
elif cd_thread_val == "get":
connect_and_send()
if sc is None:
cache.set(cd_thread_key, {"err_msg": "采集器连接失败"})
continue
resp = sc.recv(1024)
if not resp:
cache.set(cd_thread_key, {"err_msg": f"采集器返回空数据-{str(resp)}"})
elif len(resp) < 8:
cache.set(cd_thread_key, {"err_msg": f"设备未启动-{str(resp)}"})
else:
json_data = resp[5:-4]
json_str = json_data.decode('utf-8')
res = json.loads(json_str)
cache.set(cd_thread_key, res)
time.sleep(0.3)
def get_tyy_data(*args):
host, port = args[0], int(args[1])
cd_thread_key = f"cd_thread_{host}_{port}"
cd_thread_val = cache.get(cd_thread_key, default=None)
if cd_thread_val is None:
cache.set(cd_thread_key, "start")
cd_thread = MyThread(target=get_tyy_data_t, args=(host, port), daemon=True)
cd_thread.start()
cache.set(cd_thread_key, "get")
num = 0
while True:
num += 1
if num > 8:
break
val = cache.get(cd_thread_key)
if isinstance(val, dict):
if "err_msg" in val:
raise ParseError(val["err_msg"])
return val
time.sleep(0.3)
raise ParseError("获取数据超时")
if __name__ == '__main__':
print(get_tyy_data())
sc_all.pop(f"{host}_{port}", None)
sc = None
raise ParseError(f"采集器连接失败-{str(e)}")
except TimeoutError as e:
if sc:
try:
sc.close()
except Exception:
pass
sc_all.pop(f"{host}_{port}", None)
sc = None
raise ParseError(f"采集器连接超时-{str(e)}")
connect_and_send()
resp = sc.recv(1024)
res = handle_bytes(resp)
# myLogger.error(res)
if isinstance(res, str):
raise ParseError(f'采集器返回数据错误-{res}')
else:
return res

View File

@ -0,0 +1,14 @@
# 角色
你是一位数据分析专家和前端程序员,具备深厚的专业知识和丰富的实践经验。你能够精准理解用户的文本描述, 并形成报告。
# 技能
1. 仔细分析用户提供的JSON格式数据分析用户需求。
2. 依据得到的需求, 分别获取JSON数据中的关键信息。
3. 根据2中的关键信息最优化选择表格/饼图/柱状图/折线图等格式绘制报告。
# 回答要求
1. 仅生成完整的HTML代码所有功能都需要实现支持响应式不要输出任何解释或说明。
2. 代码中如需要Echarts等js库请直接使用中国大陆的CDN链接例如bootcdn的链接。
3. 标题为 数据分析报告。
3. 在开始部分请以表格形式简略展示获取的JSON数据。
4. 之后选择最合适的图表方式生成相应的图。
5. 在最后提供可下载该报告的完整PDF的按钮和功能。
6. 在最后提供可下载含有JSON数据的EXCEL文件的按钮和功能。

View File

@ -0,0 +1,53 @@
# 角色
你是一位资深的Postgresql数据库SQL专家具备深厚的专业知识和丰富的实践经验。你能够精准理解用户的文本描述并生成准确可执行的SQL语句。
# 技能
1. 仔细分析用户提供的文本描述,明确用户需求。
2. 根据对用户需求的理解生成符合Postgresql数据库语法的准确可执行的SQL语句。
# 回答要求
1. 如果用户的询问未以 查询 开头,请直接回复 "请以 查询 开头,重新描述你的需求"。
2. 生成的SQL语句必须符合Postgresql数据库的语法规范。
3. 不要使用 Markerdown 和 SQL 语法格式输出,禁止添加语法标准、备注、说明等信息。
4. 直接输出符合Postgresql标准的SQL语句用txt纯文本格式展示即可。
5. 如果无法生成符合要求的SQL语句请直接回复 "无法生成"。
# 示例
1. 问:查询 外协白片抛 工段在2025年6月1日到2025年6月15日之间的生产合格数以及合格率等
select
sum(mlog.count_use) as 领用数,
sum(mlog.count_real) as 生产数,
sum(mlog.count_ok) as 合格数,
sum(mlog.count_notok) as 不合格数,
CAST ( SUM ( mlog.count_ok ) AS FLOAT ) / NULLIF ( SUM ( mlog.count_real ), 0 ) * 100 AS 合格率
from wpm_mlog mlog
left join mtm_mgroup mgroup on mgroup.id = mlog.mgroup_id
where mlog.submit_time is not null
and mgroup.name = '外协白片抛'
and mlog.handle_date >= '2025-06-01'
and mlog.handle_date <= '2025-06-15'
2. 问:查询 黑化 工段在2025年6月的生产合格数以及合格率等
答: select
sum(mlog.count_use) as 领用数,
sum(mlog.count_real) as 生产数,
sum(mlog.count_ok) as 合格数,
sum(mlog.count_notok) as 不合格数,
CAST ( SUM ( mlog.count_ok ) AS FLOAT ) / NULLIF ( SUM ( mlog.count_real ), 0 ) * 100 AS 合格率
from wpm_mlog mlog
left join mtm_mgroup mgroup on mgroup.id = mlog.mgroup_id
where mlog.submit_time is not null
and mgroup.name = '黑化'
and mlog.handle_date >= '2025-06-01'
and mlog.handle_date <= '2025-06-30'
3. 问:查询 各工段 在2025年6月的生产合格数以及合格率等
答: select
mgroup.name as 工段,
sum(mlog.count_use) as 领用数,
sum(mlog.count_real) as 生产数,
sum(mlog.count_ok) as 合格数,
sum(mlog.count_notok) as 不合格数,
CAST ( SUM ( mlog.count_ok ) AS FLOAT ) / NULLIF ( SUM ( mlog.count_real ), 0 ) * 100 AS 合格率
from wpm_mlog mlog
left join mtm_mgroup mgroup on mgroup.id = mlog.mgroup_id
where mlog.submit_time is not null
and mlog.handle_date >= '2025-06-01'
and mlog.handle_date <= '2025-06-30'
group by mgroup.id
order by mgroup.sort

View File

@ -2,6 +2,7 @@
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from apps.ichat.views import QueryLLMviewSet, ConversationViewSet
from apps.ichat.views2 import WorkChain
API_BASE_URL = 'api/ichat/'
@ -11,4 +12,5 @@ router.register('conversation', ConversationViewSet, basename='conversation')
router.register('message', QueryLLMviewSet, basename='message')
urlpatterns = [
path(API_BASE_URL, include(router.urls)),
path(API_BASE_URL + 'workchain/ask/', WorkChain.as_view(), name='workchain')
]

129
apps/ichat/views2.py Normal file
View File

@ -0,0 +1,129 @@
import requests
import os
from apps.utils.sql import execute_raw_sql
import json
from apps.utils.tools import MyJSONEncoder
from .utils import is_safe_sql
from rest_framework.views import APIView
from drf_yasg.utils import swagger_auto_schema
from rest_framework import serializers
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from django.conf import settings
from apps.utils.mixins import MyLoggingMixin
from django.core.cache import cache
import uuid
from apps.utils.thread import MyThread
LLM_URL = getattr(settings, "LLM_URL", "")
API_KEY = getattr(settings, "LLM_API_KEY", "")
MODEL = "qwen14b"
HEADERS = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
def load_promot(name):
with open(os.path.join(CUR_DIR, f'promot/{name}.md'), 'r') as f:
return f.read()
def ask(input:str, p_name:str, stream=False):
his = [{"role":"system", "content": load_promot(p_name)}]
his.append({"role":"user", "content": input})
payload = {
"model": MODEL,
"messages": his,
"temperature": 0,
"stream": stream
}
response = requests.post(LLM_URL, headers=HEADERS, json=payload, stream=stream)
if not stream:
return response.json()["choices"][0]["message"]["content"]
else:
# 处理流式响应
full_content = ""
for chunk in response.iter_lines():
if chunk:
# 通常流式响应是SSE格式data: {...}
decoded_chunk = chunk.decode('utf-8')
if decoded_chunk.startswith("data:"):
json_str = decoded_chunk[5:].strip()
if json_str == "[DONE]":
break
try:
chunk_data = json.loads(json_str)
if "choices" in chunk_data and chunk_data["choices"]:
delta = chunk_data["choices"][0].get("delta", {})
if "content" in delta:
print(delta["content"])
full_content += delta["content"]
except json.JSONDecodeError:
continue
return full_content
def work_chain(input:str, t_key:str):
pdict = {"state": "progress", "steps": [{"state":"ok", "msg":"正在生成查询语句"}]}
cache.set(t_key, pdict)
res_text = ask(input, 'w_sql')
if res_text == '请以 查询 开头,重新描述你的需求':
pdict["state"] = "error"
pdict["steps"].append({"state":"error", "msg":res_text})
cache.set(t_key, pdict)
return
else:
pdict["steps"].append({"state":"ok", "msg":"查询语句生成成功", "content":res_text})
cache.set(t_key, pdict)
if not is_safe_sql(res_text):
pdict["state"] = "error"
pdict["steps"].append({"state":"error", "msg":"当前查询存在风险,请重新描述你的需求"})
cache.set(t_key, pdict)
return
pdict["steps"].append({"state":"ok", "msg":"正在执行查询语句"})
cache.set(t_key, pdict)
res = execute_raw_sql(res_text)
pdict["steps"].append({"state":"ok", "msg":"查询语句执行成功", "content":res})
cache.set(t_key, pdict)
pdict["steps"].append({"state":"ok", "msg":"正在生成报告"})
cache.set(t_key, pdict)
res2 = ask(json.dumps(res, cls=MyJSONEncoder, ensure_ascii=False), 'w_ana')
content = res2.lstrip('```html ').rstrip('```')
pdict["state"] = "done"
pdict["content"] = content
pdict["steps"].append({"state":"ok", "msg":"报告生成成功", "content": content})
cache.set(t_key, pdict)
return
class InputSerializer(serializers.Serializer):
input = serializers.CharField(label="查询需求")
class WorkChain(MyLoggingMixin, APIView):
@swagger_auto_schema(
operation_summary="提交查询需求",
request_body=InputSerializer)
def post(self, request):
llm_enabled = getattr(settings, "LLM_ENABLED", False)
if not llm_enabled:
raise ParseError('LLM功能未启用')
input = request.data.get('input')
t_key = f'ichat_{uuid.uuid4()}'
MyThread(target=work_chain, args=(input, t_key)).start()
return Response({'ichat_tid': t_key})
@swagger_auto_schema(
operation_summary="获取查询进度")
def get(self, request):
llm_enabled = getattr(settings, "LLM_ENABLED", False)
if not llm_enabled:
raise ParseError('LLM功能未启用')
ichat_tid = request.GET.get('ichat_tid')
if ichat_tid:
return Response(cache.get(ichat_tid))
if __name__ == "__main__":
print(work_chain("查询 一次超洗 工段在2025年6月的生产合格数等并形成报告"))
from apps.ichat.views2 import work_chain
print(work_chain('查询外观检验工段在2025年6月的生产合格数等并形成报告'))

View File

@ -35,6 +35,7 @@ class MioFilter(filters.FilterSet):
"order": ["exact"],
"item_mio__test_date": ["isnull"],
"item_mio__test_user": ["isnull"],
"item_mio__w_mioitem__number": ["exact"],
"mgroup": ["exact"],
"item_mio__batch": ["exact"]
}

View File

@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2025-06-19 02:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inm', '0030_auto_20250523_0922'),
]
operations = [
migrations.AddField(
model_name='mioitem',
name='unit_price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=14, null=True, verbose_name='单价'),
),
]

View File

@ -139,6 +139,7 @@ class MIOItem(BaseModel):
material = models.ForeignKey(
Material, verbose_name='物料', on_delete=models.CASCADE)
batch = models.TextField('批次号', db_index=True)
unit_price = models.DecimalField('单价', max_digits=14, decimal_places=2, null=True, blank=True)
count = models.DecimalField('出入数量', max_digits=12, decimal_places=3)
count_tested = models.PositiveIntegerField('已检数', null=True, blank=True)
test_date = models.DateField('检验日期', null=True, blank=True)

View File

@ -12,6 +12,7 @@ from .models import MIO, MaterialBatch, MIOItem, WareHouse, MIOItemA, MaterialBa
from django.db import transaction
from server.settings import get_sysconfig
from apps.wpmw.models import Wpr
from decimal import Decimal
class WareHourseSerializer(CustomModelSerializer):
@ -109,7 +110,7 @@ class MIOItemCreateSerializer(CustomModelSerializer):
class Meta:
model = MIOItem
fields = ['mio', 'warehouse', 'material',
'batch', 'count', 'assemb', 'is_testok', 'mioitemw', 'mb', 'wm']
'batch', 'count', 'assemb', 'is_testok', 'mioitemw', 'mb', 'wm', 'unit_price']
extra_kwargs = {
'mio': {'required': True}, 'warehouse': {'required': False},
'material': {'required': False}, 'batch': {'required': False}}
@ -207,8 +208,7 @@ class MIOItemAListSerializer(CustomModelSerializer):
class MIOItemSerializer(CustomModelSerializer):
warehouse_name = serializers.CharField(
source='warehouse.name', read_only=True)
warehouse_name = serializers.CharField(source='warehouse.name', read_only=True)
material_ = MaterialSerializer(source='material', read_only=True)
assemb = MIOItemAListSerializer(
source='a_mioitem', read_only=True, many=True)
@ -222,6 +222,15 @@ class MIOItemSerializer(CustomModelSerializer):
model = MIOItem
fields = '__all__'
def to_representation(self, instance):
ret = super().to_representation(instance)
ret["price"] = None
if ret["unit_price"] is not None:
ret["price"] = Decimal(ret["count"]) * Decimal(ret["unit_price"])
return ret
class MioItemDetailSerializer(MIOItemSerializer):
mio_ = MIOListSerializer(source='mio', read_only=True)
class MIODoSerializer(CustomModelSerializer):

View File

@ -14,7 +14,7 @@ from apps.inm.serializers import (
MaterialBatchSerializer, WareHourseSerializer, MIOListSerializer, MIOItemSerializer, MioItemAnaSerializer,
MIODoSerializer, MIOSaleSerializer, MIOPurSerializer, MIOOtherSerializer, MIOItemCreateSerializer,
MaterialBatchDetailSerializer, MIODetailSerializer, MIOItemTestSerializer, MIOItemPurInTestSerializer,
MIOItemwSerializer)
MIOItemwSerializer, MioItemDetailSerializer)
from apps.inm.serializers2 import MIOItemwCreateUpdateSerializer
from apps.utils.viewsets import CustomGenericViewSet, CustomModelViewSet
from apps.inm.services import InmService
@ -25,6 +25,8 @@ from apps.utils.permission import has_perm
from .filters import MaterialBatchFilter, MioFilter
from apps.qm.serializers import FtestProcessSerializer
from apps.mtm.models import Material
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
# Create your views here.
@ -243,6 +245,7 @@ class MIOItemViewSet(CustomListModelMixin, BulkCreateModelMixin, BulkDestroyMode
perms_map = {'get': '*', 'post': '*', 'delete': '*'}
queryset = MIOItem.objects.all()
serializer_class = MIOItemSerializer
retrieve_serializer_class = MioItemDetailSerializer
create_serializer_class = MIOItemCreateSerializer
select_related_fields = ['warehouse', 'mio', 'material', 'test_user']
filterset_fields = {
@ -258,9 +261,28 @@ class MIOItemViewSet(CustomListModelMixin, BulkCreateModelMixin, BulkDestroyMode
ordering_fields = ['create_time', 'test_date']
def add_info_for_list(self, data):
with_mio = self.request.query_params.get('with_mio', "no")
if with_mio == "yes" and isinstance(data, list):
mio_ids = [item['mio'] for item in data]
mio_qs = MIO.objects.filter(id__in=mio_ids)
mio_qs_= MIOListSerializer(mio_qs, many=True).data
mio_dict = {mio['id']: mio for mio in mio_qs_}
for item in data:
mioId = item['mio']
item['mio_'] = mio_dict[mioId]
return data
@swagger_auto_schema(manual_parameters=[
openapi.Parameter(name="with_mio", in_=openapi.IN_QUERY, description="是否返回出入库记录信息",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter(name="query", in_=openapi.IN_QUERY, description="定制返回数据",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter(name="with_children", in_=openapi.IN_QUERY, description="带有children(yes/no/count)",
type=openapi.TYPE_STRING, required=False)
])
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
def perform_destroy(self, instance):
if instance.mio.state != MIO.MIO_CREATE:
raise ParseError('出入库记录非创建中不可删除')

View File

@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2025-06-18 08:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mtm', '0058_process_wpr_number_rule'),
]
operations = [
migrations.AddField(
model_name='material',
name='bin_number_main',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='主库位号'),
),
]

View File

@ -101,6 +101,7 @@ class Material(CommonAModel):
brothers = models.JSONField('兄弟件', default=list, null=False, blank=True)
unit_price = models.DecimalField('单价', max_digits=14, decimal_places=2, null=True, blank=True)
into_wm = models.BooleanField('是否进入车间库存', default=True)
bin_number_main = models.CharField('主库位号', max_length=50, null=True, blank=True)
class Meta:
verbose_name = '物料表'

View File

@ -24,7 +24,7 @@ class MaterialSimpleSerializer(CustomModelSerializer):
class Meta:
model = Material
fields = ['id', 'name', 'number', 'model',
'specification', 'type', 'cate', 'brothers', 'process_name', 'full_name', "tracking"]
'specification', 'type', 'cate', 'brothers', 'process_name', 'full_name', "tracking", "bin_number_main"]
def get_full_name(self, obj):
return f'{obj.name}|{obj.specification if obj.specification else ""}|{obj.model if obj.model else ""}|{obj.process.name if obj.process else ""}'

View File

@ -50,7 +50,7 @@ class MtaskFilter(filters.FilterSet):
"is_count_utask": ["exact"],
"start_date": ["exact", "gte", "lte"],
"end_date": ["exact", "gte", "lte"],
"mgroup": ["exact"],
"mgroup": ["exact", "in"],
"mgroup__name": ["exact"],
"mgroup__cate": ["exact"],
"mgroup__process": ["exact"],

View File

@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2025-06-11 03:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pm', '0021_auto_20250317_1040'),
]
operations = [
migrations.AddField(
model_name='utask',
name='priority',
field=models.PositiveIntegerField(default=20, help_text='10:低;20:中;30:高', verbose_name='优先级'),
),
]

View File

@ -37,6 +37,7 @@ class Utask(CommonBDModel):
type = models.CharField('任务类型', max_length=10,
help_text=str(TASK_TYPE), default='mass')
routepack = models.ForeignKey(RoutePack, verbose_name='关联工艺包', on_delete=models.SET_NULL, null=True, blank=True)
priority = models.PositiveIntegerField('优先级', default=20, help_text="10:低;20:中;30:高")
state = models.PositiveIntegerField(
'状态', choices=UTASK_STATES, default=UTASK_CREATED, help_text=str(UTASK_STATES))
number = models.CharField('编号', max_length=50, unique=True)

View File

@ -27,7 +27,8 @@ class UtaskSerializer(CustomModelSerializer):
model = Utask
fields = '__all__'
extra_kwargs = {
'number': {"required": False, "allow_blank": True}
'number': {"required": False, "allow_blank": True},
"priority": {"required": False, "allow_null": True},
}
@transaction.atomic
@ -52,6 +53,7 @@ class UtaskSerializer(CustomModelSerializer):
attrs['count_day'] = math.ceil(attrs['count']/rela_days)
except Exception:
raise ParseError('日均任务数计划失败')
attrs["priority"] = attrs.get("priority", 20)
return attrs
def update(self, instance, validated_data):

View File

@ -29,7 +29,8 @@ class UtaskViewSet(CustomModelViewSet):
serializer_class = UtaskSerializer
filterset_class = UtaskFilter
select_related_fields = ['material']
ordering = ['-start_date']
ordering_fields = ['priority', 'start_date']
ordering = ["priority", '-start_date']
def perform_destroy(self, instance):
if instance.state >= Utask.UTASK_WORKING:
@ -143,8 +144,8 @@ class MtaskViewSet(CustomModelViewSet):
filterset_class = MtaskFilter
select_related_fields = ['material_in', 'material_out', 'mgroup']
prefetch_related_fields = ['mlog_mtask', 'b_mtask']
ordering_fields = ['start_date', 'mgroup__process__sort', 'create_time']
ordering = ['-start_date', 'route__sort', 'mgroup__process__sort', '-create_time']
ordering_fields = ["utask__priority", 'start_date', 'mgroup__process__sort', 'create_time']
ordering = ["utask__priority", '-start_date', 'route__sort', 'mgroup__process__sort', '-create_time']
@action(methods=['post'], detail=False, perms_map={'post': '*'}, serializer_class=MtaskDaySerializer)
@transaction.atomic

View File

@ -110,7 +110,8 @@ class MlogFilter(filters.FilterSet):
"mgroup__name": ["exact", "in", "contains"],
"submit_time": ["isnull"],
"fmlog": ["exact"],
"b_mlog__batch": ["exact"]
"b_mlog__batch": ["exact"],
"b_mlog__w_mlogb__number": ["exact"],
}
def filter_cbatch(self, queryset, name, value):
@ -158,7 +159,8 @@ class HandoverFilter(filters.FilterSet):
"recive_mgroup__name": ["exact"],
"type": ["exact", "in"],
"mtype": ["exact", "in"],
"b_handover__batch": ["exact"]
"b_handover__batch": ["exact"],
"b_handover__w_handoverb__number": ["exact"]
}

View File

@ -0,0 +1,19 @@
# Generated by Django 3.2.12 on 2025-06-09 05:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wpm', '0116_auto_20250523_0922'),
]
operations = [
migrations.AlterField(
model_name='handoverbw',
name='handoverb',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='w_handoverb', to='wpm.handoverb', verbose_name='关联交接记录'),
),
]

View File

@ -125,6 +125,10 @@ class WMaterial(CommonBDModel):
def count_working(self):
return Mlogb.objects.filter(wm_in=self, mlog__submit_time__isnull=True).aggregate(count=Sum('count_use'))['count'] or 0
@property
def count_handovering(self):
return Handoverb.objects.filter(wm=self, handover__submit_time__isnull=True).aggregate(count=Sum('count'))['count'] or 0
@classmethod
def mat_in_qs(cls, mtask: Mtask, qs=None):
"""
@ -316,7 +320,7 @@ class Mlog(CommonADModel):
通过mlogb计算mlog count 合计
"""
mlog = self
if mlog.fill_way == Mlog.MLOG_23:
if mlog.fill_way in [Mlog.MLOG_23, Mlog.MLOG_12]:
a_dict = {
"total_count_use": Sum('count_use'),
"total_count_break": Sum('count_break'),
@ -431,16 +435,16 @@ class Mlogb(BaseModel):
def mlogbdefect(self):
return MlogbDefect.objects.filter(mlogb=self)
def cal_count_pn_jgqbl(self):
def cal_count_pn_jgqbl(self, cal_mlog=False):
mqs = MlogbDefect.get_defect_qs_from_mlogb(self, ftype="in")
count_pn_jgqbl = mqs.aggregate(total=Sum("count"))["total"] or 0
self.count_pn_jgqbl = count_pn_jgqbl
self.save(update_fields=["count_pn_jgqbl"])
mlog = self.mlog
if mlog:
if mlog and cal_mlog:
mlog.cal_mlog_count_from_mlogb()
def cal_count_notok(self):
def cal_count_notok(self, cal_mlog=True):
mqs = MlogbDefect.get_defect_qs_from_mlogb(self, ftype="out")
count_notok = mqs.filter(defect__okcate=30).aggregate(total=Sum("count"))["total"] or 0
count_notok_full = mqs.exclude(defect__okcate=10).aggregate(total=Sum("count"))["total"] or 0
@ -448,10 +452,10 @@ class Mlogb(BaseModel):
self.count_ok = self.count_real - count_notok
self.count_ok_full = self.count_real - count_notok_full
if self.count_ok_full < 0:
raise ParseError("完全合格数不能小于0")
raise ParseError(f"完全合格数不能小于0:{self.count_real}-{self.count_ok}-{self.count_notok}-{self.count_ok_full}")
self.save(update_fields=["count_ok", "count_notok", "count_ok_full"])
mlog = self.mlog
if mlog:
if mlog and cal_mlog:
mlog.cal_mlog_count_from_mlogb()
class MlogbDefect(BaseModel):
@ -518,7 +522,7 @@ class Mlogbw(BaseModel):
mlogb.count_notok = count_notok
mlogb.count_ok = count - mlogb.count_notok
mlogb.save()
mlogb.cal_count_notok()
mlogb.cal_count_notok(cal_mlog=True)
class Handover(CommonADModel):
"""
@ -592,7 +596,7 @@ class Handoverb(BaseModel):
class Handoverbw(BaseModel):
"""TN: 单个产品交接记录
"""
handoverb = models.ForeignKey(Handoverb, verbose_name='关联交接记录', on_delete=models.CASCADE)
handoverb = models.ForeignKey(Handoverb, verbose_name='关联交接记录', on_delete=models.CASCADE, related_name="w_handoverb")
number = models.TextField('单个编号')
wpr = models.ForeignKey("wpmw.wpr", verbose_name='关联产品', on_delete=models.CASCADE
, related_name='wpr_handoverbw', null=True, blank=True)
@ -673,15 +677,14 @@ class BatchSt(BaseModel):
cls_qs = cls.objects.filter(batch=batch)
if cls_qs.exists():
if reuse_node:
node:BatchSt = (cls_qs.filter(mio__isnull=False)|cls_qs.filter(
material_start__isnull=True)).order_by('-version').first()
node:BatchSt = (cls_qs.filter(mio__isnull=False)|cls_qs.filter(handover=None, mio=None, mlog=None)).order_by('-version').first()
if node is None:
raise ParseError(f"{batch}-该批次号因物料不同不可引用")
elif node.material_start is None:
node.material_start = material_start
node.save(update_fields = ["material_start"])
elif node.material_start is not None and node.material_start != material_start:
raise ParseError(f"{batch}-该批次号因物料不同不可引用-{str(node.material_start)} vs {str(material_start)}")
# elif node.material_start is not None and node.material_start != material_start:
# raise ParseError(f"{batch}-该批次号因物料不同不可引用-{str(node.material_start)} vs {str(material_start)}")
return node, False
else:
latest_version = BatchSt.objects.filter(batch=batch).aggregate(Max("version"))["version__max"]

View File

@ -25,10 +25,17 @@ def main(batch: str):
if mlogb1_qs.exists():
data[f"{mgroup_name}_日期"] = []
data[f"{mgroup_name}_操作人"] = []
data[f"{mgroup_name}_count_use"] = 0
data[f"{mgroup_name}_count_real"] = 0
data[f"{mgroup_name}_count_ok"] = 0
data[f"{mgroup_name}_count_ok_full"] = 0
mlogb_q_ids = []
for item in mlogb1_qs:
# 找到对应的输入
mlogb_from:Mlogb = item.mlogb_from
if mlogb_from:
mlogb_q_ids.append(mlogb_from.id)
data[f"{mgroup_name}_count_use"] += mlogb_from.count_use
if item.mlog.handle_user:
data[f"{mgroup_name}_操作人"].append(item.mlog.handle_user)
if item.mlog.handle_date:
@ -48,6 +55,11 @@ def main(batch: str):
data[f"{mgroup_name}_合格率"] = 0
mlogbd1_qs = MlogbDefect.objects.filter(mlogb__in=mlogb1_qs, count__gt=0).values("defect__name").annotate(total=Sum("count"))
mlogbd1_q_qs = MlogbDefect.objects.filter(mlogb__id__in=mlogb_q_ids, count__gt=0).values("defect__name").annotate(total=Sum("count"))
for item in mlogbd1_q_qs:
data[f"{mgroup_name}_加工前_缺陷_{item['defect__name']}"] = item["total"]
data[f"{mgroup_name}_加工前_缺陷_{item['defect__name']}_比例"] = round((item["total"] / data[f"{mgroup_name}_count_use"])*100, 2)
for item in mlogbd1_qs:
data[f"{mgroup_name}_缺陷_{item['defect__name']}"] = item["total"]
@ -58,6 +70,7 @@ def main(batch: str):
data[f"{mgroup_name}_操作人"] = list(set(data[f"{mgroup_name}_操作人"]))
data[f"{mgroup_name}_操作人"] = ";".join([item.name for item in data[f"{mgroup_name}_操作人"]])
mlogb2_qs = Mlogb.objects.filter(mlog__submit_time__isnull=False, material_out__isnull=False, mlog__mgroup__name="外观检验", mlog__is_fix=True, batch=batch)
if mlogb2_qs.exists():
data["外观检验_返修_日期"] = []
@ -72,7 +85,7 @@ def main(batch: str):
data["外观检验_返修_日期"].append(item.mlog.handle_date)
data["外观检验_返修_count_real"] += item.count_real
data["外观检验_返修_count_ok"] += item.count_ok
data["外观检验_返修_count_ok_full"] += item.count_ok_full
data["外观检验_返修_count_ok_full"] += item.count_ok_full if item.count_ok_full else 0
data["外观检验_返修_日期"] = list(set(data["外观检验_返修_日期"]))
data["外观检验_返修_日期"] = ";".join([item.strftime("%Y-%m-%d") for item in data["外观检验_返修_日期"]])

View File

@ -192,6 +192,7 @@ class WMaterialSerializer(CustomModelSerializer):
notok_sign_name = serializers.SerializerMethodField()
defect_name = serializers.CharField(source="defect.name", read_only=True)
count_working = serializers.IntegerField(read_only=True, label='在制数量')
count_handovering = serializers.IntegerField(read_only=True, label='正在交送的数量')
def get_notok_sign_name(self, obj):
return getattr(NotOkOption, obj.notok_sign, NotOkOption.qt).label if obj.notok_sign else None
@ -202,7 +203,10 @@ class WMaterialSerializer(CustomModelSerializer):
def to_representation(self, instance):
ret = super().to_representation(instance)
ret['count_cando'] = str(Decimal(ret['count']) - Decimal(ret['count_working']))
if 'count' in ret and 'count_working' in ret:
ret['count_cando'] = str(Decimal(ret['count']) - Decimal(ret['count_working']))
if 'count' in ret and 'count_handovering' in ret:
ret['count_canhandover'] = str(Decimal(ret['count']) - Decimal(ret['count_handovering']))
return ret
class MlogbDefectSerializer(CustomModelSerializer):
@ -388,7 +392,7 @@ class MlogSerializer(CustomModelSerializer):
]
if mlogbin_defect_objects:
MlogbDefect.objects.bulk_create(mlogbin_defect_objects)
mlogbin.cal_count_pn_jgqbl()
mlogbin.cal_count_pn_jgqbl(cal_mlog=False)
# mlogb只用于组合件输出物填写
brotherId_should_list = material_out.brothers
@ -415,7 +419,7 @@ class MlogSerializer(CustomModelSerializer):
pass
else:
batch_out = generate_new_batch(batch_in, instance)
add_dict_2 = {
'mlog': instance, 'batch': batch_out,
'mtask': instance.mtask, 'material_out': instance.material_out,
@ -444,7 +448,8 @@ class MlogSerializer(CustomModelSerializer):
]
if mlogb_defect_objects:
MlogbDefect.objects.bulk_create(mlogb_defect_objects)
mlogb.cal_count_notok()
mlogb.cal_count_notok(cal_mlog=False)
instance.cal_mlog_count_from_mlogb()
return instance
def update(self, instance, validated_data):
@ -494,7 +499,7 @@ class MlogSerializer(CustomModelSerializer):
]
if mlogbin_defect_objects:
MlogbDefect.objects.bulk_create(mlogbin_defect_objects)
minx.cal_count_pn_jgqbl()
minx.cal_count_pn_jgqbl(cal_mlog=False)
# 修改产出
if instance.fill_way == Mlog.MLOG_2 and instance.material_out.brothers:
@ -516,6 +521,8 @@ class MlogSerializer(CustomModelSerializer):
batch_out = generate_new_batch(batch_in, instance)
mox, _ = Mlogb.objects.get_or_create(mlog=instance, batch=batch_out,
mtask=instance.mtask, material_out=instance.material_out)
# 需要同步更新数量
mox.count_real = instance.count_real
mox.count_ok = instance.count_ok
mox.count_notok = instance.count_notok
mox.count_break_t = instance.count_break_t
@ -539,7 +546,8 @@ class MlogSerializer(CustomModelSerializer):
]
if mlogb_defect_objects:
MlogbDefect.objects.bulk_create(mlogb_defect_objects)
mox.cal_count_notok()
mox.cal_count_notok(cal_mlog=False)
instance.cal_mlog_count_from_mlogb()
return instance
def validate(self, attrs):
@ -578,20 +586,21 @@ class MlogSerializer(CustomModelSerializer):
supplier = attrs.get('supplier', None)
if not supplier:
raise ParseError('外协必须选择外协单位')
mtask = attrs.get('mtask', None)
count_notok = 0
for i in attrs:
if 'count_n_' in i:
if attrs[i] < 0:
raise ParseError(f'{attrs[i]}不能小于0')
count_notok = count_notok + attrs[i]
attrs['count_notok'] = count_notok
mlogdefect = attrs.get('mlogdefect', None)
if mlogdefect is None:
count_notok = 0
for i in attrs:
if 'count_n_' in i:
if attrs[i] < 0:
raise ParseError(f'{attrs[i]}不能小于0')
count_notok = count_notok + attrs[i]
attrs['count_notok'] = count_notok
if attrs['count_ok'] < 0:
raise ParseError('合格数量不能小于0')
if attrs['count_real'] >= attrs['count_ok'] + attrs['count_notok']:
pass
else:
raise ParseError('生产数量不能小于合格数量')
if attrs['count_real'] != attrs['count_ok'] + attrs['count_notok']:
raise ParseError('生产数量需等于合格数量+不合格数量')
if mtask:
if mtask.start_date == mtask.end_date:
attrs['handle_date'] = mtask.start_date
@ -755,7 +764,7 @@ class MlogbInSerializer(CustomModelSerializer):
]
if mlogb_defect_objects:
MlogbDefect.objects.bulk_create(mlogb_defect_objects)
ins.cal_count_pn_jgqbl()
ins.cal_count_pn_jgqbl(cal_mlog=False)
return ins
class MlogbInUpdateSerializer(CustomModelSerializer):
@ -773,16 +782,30 @@ class MlogbInUpdateSerializer(CustomModelSerializer):
mlogbdefect = validated_data.pop("mlogbdefect", None)
if mlog.submit_time is not None:
raise ParseError('生产日志已提交不可编辑')
ins = super().update(instance, validated_data)
ins:Mlogb = super().update(instance, validated_data)
if mlogbdefect is not None and ins.material_in.tracking == Material.MA_TRACKING_BATCH:
MlogbDefect.objects.filter(mlogb=ins).delete()
mlogb_defect_objects = [
MlogbDefect(**{**item, "mlogb": ins, "id": idWorker.get_id()})
for item in mlogbdefect if item["count"] > 0
]
if mlogb_defect_objects:
MlogbDefect.objects.bulk_create(mlogb_defect_objects)
ins.cal_count_notok()
MlogbDefect.objects.filter(mlogb=ins).delete()
mlogb_defect_objects = [
MlogbDefect(**{**item, "mlogb": ins, "id": idWorker.get_id()})
for item in mlogbdefect if item["count"] > 0
]
if mlogb_defect_objects:
MlogbDefect.objects.bulk_create(mlogb_defect_objects)
ins.cal_count_pn_jgqbl(cal_mlog=False)
# 只有普通工序的才可联动
material_out:Material = ins.mlog.material_out
route:Route = mlog.route
if material_out.tracking == Material.MA_TRACKING_BATCH:
if route and route.process and route.process.mtype == Process.PRO_NORMAL:
mlogbout_qs = Mlogb.objects.filter(mlog=ins.mlog, mlogb_from=ins)
if mlogbout_qs.count() == 1:
mlogbout = mlogbout_qs.first()
mlogbout.count_real = ins.count_use - ins.count_pn_jgqbl - ins.count_break
mlogbout.count_ok = mlogbout.count_real - mlogbout.count_notok
if mlogbout.count_real < 0 or mlogbout.count_ok < 0:
raise ParseError("对应的产出数异常")
mlogbout.save(update_fields=["count_real", "count_ok"])
mlogbout.cal_count_notok(cal_mlog=False)
return ins
class MlogbwCreateUpdateSerializer(CustomModelSerializer):
@ -898,7 +921,7 @@ class MlogbOutUpdateSerializer(CustomModelSerializer):
]
if mlogb_defect_objects:
MlogbDefect.objects.bulk_create(mlogb_defect_objects)
ins.cal_count_notok()
ins.cal_count_notok(cal_mlog=False)
return ins
def validate(self, attrs):

View File

@ -150,6 +150,8 @@ def mlog_submit(mlog: Mlog, user: User, now: Union[datetime.datetime, None]):
"""
生产日志提交后需要执行的操作
"""
if mlog.count_real == 0:
raise ParseError('产出数量不能为0')
if mlog.submit_time is not None:
return
if now is None:
@ -914,8 +916,39 @@ def handover_submit(handover:Handover, user: User, now: Union[datetime.datetime,
def handover_revert(handover:Handover):
if handover.submit_time is None:
raise ParseError('该交接单未提交!')
handover_type = handover.type
handover_mtype = handover.mtype
if handover_type in [Handover.H_NORMAL, Handover.H_REPAIR] and handover_mtype == Handover.H_NORMAL:
raise ParseError('该交接单不支持撤销!')
handoverb_qs = Handoverb.objects.filter(handover=handover)
material = handover.material
for item in handoverb_qs:
wm = item.wm
wm_to = item.wm_to
if wm is None or wm_to is None:
raise ParseError('该交接单不支持撤销2!')
wm.count = wm.count + item.count
wm.save()
wm_to.count = wm_to.count - item.count
if wm_to.count < 0:
raise ParseError('库存不足无法撤回!')
wm_to.save()
if material.tracking == Material.MA_TRACKING_SINGLE:
handoverbws = Handoverbw.objects.filter(handoverb=item)
if handoverbws.count() != item.count:
raise ParseError("交接与明细数量不一致,操作失败")
for item in handoverbws:
wpr:Wpr = item.wpr
Wpr.change_or_new(wpr=wpr, wm=wm, old_wm=wpr.wm, old_mb=wpr.mb, add_version=False)
handover.submit_time = None
handover.submit_user = None
handover.save()
# 删除追踪链
BatchLog.clear(handover=handover)
pass
def mlog_submit_validate(ins: Mlog):
if ins.submit_time:

View File

@ -27,7 +27,7 @@ from .serializers import (SflogExpSerializer, SfLogSerializer, StLogSerializer,
MlogbOutUpdateSerializer, FmlogSerializer, FmlogUpdateSerializer, BatchStSerializer,
MlogbwCreateUpdateSerializer, HandoverMgroupSerializer, MlogListSerializer,
MlogbSerializer, MlogUserSerializer, BatchLogSerializer)
from .services import mlog_submit, handover_submit, mlog_revert, get_batch_dag
from .services import mlog_submit, handover_submit, mlog_revert, get_batch_dag, handover_revert
from apps.wpm.services import mlog_submit_validate, generate_new_batch
from apps.wf.models import State
from apps.wpmw.models import Wpr
@ -427,6 +427,18 @@ class HandoverViewSet(CustomModelViewSet):
if ins.submit_time is None:
handover_submit(ins, user, None)
return Response()
@action(methods=['post'], detail=True, perms_map={'post': 'handover.submit'}, serializer_class=Serializer)
@transaction.atomic
def revert(self, request, *args, **kwargs):
"""交接记录撤回(变动车间库存)
交接记录撤回
"""
ins: Handover = self.get_object()
if ins.submit_time:
handover_revert(ins)
return Response()
@action(methods=['post'], detail=False, perms_map={'post': '*'}, serializer_class=HandoverMgroupSerializer)
@transaction.atomic
@ -563,7 +575,13 @@ class MlogbInViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, Cust
if ins.mlog.submit_time is not None:
raise ParseError('生产日志已提交不可编辑')
ins.delete()
ins.mlog.cal_mlog_count_from_mlogb()
@transaction.atomic
def perform_update(self, serializer):
ins = serializer.save()
ins.mlog.cal_mlog_count_from_mlogb()
@transaction.atomic
def perform_create(self, serializer):
mlogbin: Mlogb = serializer.save()
@ -611,7 +629,7 @@ class MlogbInViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, Cust
d_count_real = mlogbin.count_use - mlogbin.count_pn_jgqbl
d_count_ok = d_count_real
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=
update_dict(m_dict, {"count_real": d_count_real, "count_ok": d_count_ok}))
update_dict(m_dict, {"count_real": d_count_real, "count_ok": d_count_ok, "count_ok_full": d_count_ok}))
mlogbout.count_json_from = mlogbin.count_json_from
mlogbout.save(update_fields=["count_json_from"])
if material_in.tracking == Material.MA_TRACKING_SINGLE and material_out.tracking == Material.MA_TRACKING_SINGLE:
@ -627,34 +645,42 @@ class MlogbInViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, Cust
if process and process.number_to_batch:
m_dict["batch"] = mlogbwin.number
mlogbout, _ = Mlogb.objects.get_or_create(
mlogbw_from=mlogbwin, defaults=update_dict(m_dict, {"count_real": div_number, "count_ok": div_number}))
mlogbw_from=mlogbwin, defaults=update_dict(m_dict, {"count_real": div_number, "count_ok": div_number, "count_ok_full": div_number}))
if lenx == 1:
mlogbout.mlogb_from = mlogbin
mlogbout.number_from = mlogbwin.number
mlogbout.save()
elif material_in.tracking == Material.MA_TRACKING_SINGLE and material_out.tracking == Material.MA_TRACKING_SINGLE:
d_count_real = (mlogbin.count_use-mlogbin.count_pn_jgqbl) * div_number
d_count_ok = d_count_real
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict,
{"count_real": d_count_real, "count_ok": d_count_ok}))
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=m_dict)
d_count_real = 0
for mlogbwin in Mlogbw.objects.filter(mlogb=mlogbin).order_by("number"):
wpr_ = mlogbwin.wpr
for key, val in wpr_.oinfo.items():
if val['name'] == "切片数":
div_number = int(val["val"])
d_count_real = d_count_real + div_number
if div_number == 1:
Mlogbw.objects.get_or_create(wpr=wpr_, mlogb=mlogbout, defaults={"number": wpr_.number, "mlogbw_from": mlogbwin})
else:
for i in range(div_number):
Mlogbw.objects.get_or_create(mlogb=mlogbout, number=f'{wpr_.number}-{i+1}', defaults={"mlogbw_from": mlogbwin})
d_count_ok = d_count_real
mlogbout.count_real = d_count_real
mlogbout.count_ok = d_count_ok
mlogbout.count_ok_full = d_count_ok
mlogbout.save(update_fields=["count_real", "count_ok", "count_ok_full"])
elif material_in.tracking == Material.MA_TRACKING_BATCH and material_out.tracking == Material.MA_TRACKING_BATCH:
d_count_real = (mlogbin.count_use-mlogbin.count_pn_jgqbl) * div_number
d_count_ok = d_count_real
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict,{"count_real": d_count_real, "count_ok": d_count_ok}))
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict,{"count_real": d_count_real, "count_ok": d_count_ok, "count_ok_full": d_count_ok}))
mlogbout.count_json_from = mlogbin.count_json_from
mlogbout.save(update_fields=["count_json_from"])
elif mtype == Process.PRO_MERGE: # 支持批到批,批到个
xcount = math.floor( (mlogbin.count_use-mlogbin.count_pn_jgqbl) / route.div_number)
div_number = route.div_number
xcount = math.floor( (mlogbin.count_use-mlogbin.count_pn_jgqbl) / div_number)
d_count_real = xcount
d_count_ok = xcount
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict, {"count_real": d_count_real, "count_ok": d_count_ok}))
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict, {"count_real": d_count_real, "count_ok": d_count_ok, "count_ok_full": d_count_ok}))
mlogbout.count_json_from = mlogbin.count_json_from
mlogbout.save(update_fields=["count_json_from"])
wpr_number_rule = process.wpr_number_rule
@ -678,13 +704,14 @@ class MlogbInViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, Cust
elif is_fix:# 支持批到批,个到个
d_count_real = mlogbin.count_use-mlogbin.count_pn_jgqbl
d_count_ok = d_count_real
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict,{"count_real": d_count_real, "count_ok": d_count_ok}))
mlogbout, _ = Mlogb.objects.get_or_create(mlogb_from=mlogbin, defaults=update_dict(m_dict,{"count_real": d_count_real, "count_ok": d_count_ok, "count_ok_full": d_count_ok}))
if material_in.tracking == Material.MA_TRACKING_SINGLE and material_out.tracking == Material.MA_TRACKING_SINGLE:
for mlogbwin in Mlogbw.objects.filter(mlogb=mlogbin).order_by("number"):
wpr_ = mlogbwin.wpr
Mlogbw.objects.get_or_create(wpr=wpr_, mlogb=mlogbout, defaults={"number": wpr_.number, "mlogbw_from": mlogbwin})
else:
raise ParseError("不支持生成产出物料!")
mlog.cal_mlog_count_from_mlogb()
@classmethod
def gen_number_with_rule(cls, rule, material_out:Material, gen_count=1):
@ -698,6 +725,7 @@ class MlogbInViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, Cust
cq_w = 4
if '02d' in rule:
cq_w = 2
n_count = 0
if wpr:
try:
n_count = int(wpr.number[-cq_w:].lstrip('0'))
@ -705,9 +733,9 @@ class MlogbInViewSet(CreateModelMixin, UpdateModelMixin, DestroyModelMixin, Cust
raise ParseError(f"获取该类产品最后编号错误: {str(e)}")
try:
if gen_count == 1:
return rule.format(c_year=c_year, c_month=c_month, m_model=m_model, n_count=n_count, c_year2=c_year2)
return rule.format(c_year=c_year, c_month=c_month, m_model=m_model, n_count=n_count+1, c_year2=c_year2)
else:
return [rule.format(c_year=c_year, c_month=c_month, m_model=m_model, n_count=n_count+i, c_year2=c_year2) for i in range(gen_count)]
return [rule.format(c_year=c_year, c_month=c_month, m_model=m_model, n_count=n_count+i+1, c_year2=c_year2) for i in range(gen_count)]
except Exception as e:
raise ParseError(f"个号生成错误: {e}")
@ -716,6 +744,11 @@ class MlogbOutViewSet(UpdateModelMixin, CustomGenericViewSet):
queryset = Mlogb.objects.filter(material_out__isnull=False)
serializer_class = MlogbOutUpdateSerializer
@transaction.atomic
def perform_update(self, serializer):
ins:Mlogb = serializer.save()
ins.mlog.cal_mlog_count_from_mlogb()
class FmlogViewSet(CustomModelViewSet):
perms_map = {'get': '*', 'post': 'mlog.create', 'put': 'mlog.update', 'delete': 'mlog.delete'}

View File

@ -29,7 +29,7 @@ class Wpr(BaseModel):
wpr_from = models.ForeignKey("self", verbose_name="来源于", on_delete=models.CASCADE, null=True, blank=True)
@classmethod
def change_or_new(cls, wpr=None, number=None, mb=None, wm=None, old_mb=None, old_wm=None, ftest=None, wpr_from=None):
def change_or_new(cls, wpr=None, number=None, mb=None, wm=None, old_mb=None, old_wm=None, ftest=None, wpr_from=None, add_version=True):
is_create = False
if wpr is None and number is None:
raise ParseError("id和number不能同时为空")
@ -105,7 +105,8 @@ class Wpr(BaseModel):
WprDefect.objects.filter(wpr=ins).delete()
if wm.defect:
WprDefect.objects.create(wpr=ins, defect=wm.defect, is_main=True)
ins.version = ins.version + 1
if add_version:
ins.version = ins.version + 1
ins.save()
if ftest:
# 通过检验变更其缺陷项

View File

@ -1,3 +1,27 @@
## 2.6.2025061715
- feat: 新增功能
- mlogbin的编辑与mlogbout的联动 [caoqianming]
- mlogb首次创建需要更新count_ok_full [caoqianming]
- 支持从wpr处获取切片数 [caoqianming]
- workchain通过线程执行 [caoqianming]
- mlogbin 和 update时cal_count_notok cal_mlog传False [caoqianming]
- ichat 添加workchain接口 [caoqianming]
- 优化get_tyy_data [caoqianming]
- batch_gxerp添加加工前缺陷 [caoqianming]
- 增加check_sql_safe 稳定性 [caoqianming]
- 放开出入库的物料匹配 [caoqianming]
- 采购入库节点可复用 [caoqianming]
- fix: 问题修复
- mlogbinupdate时的bug [caoqianming]
- mlogserializer需要同步count_real [caoqianming]
- cal_mlog_count_from_mlogb 需要传参触发 [caoqianming]
- mlog 数值计算bug [caoqianming]
- wmaterial get在传入query参数时完善can_do的逻辑 [caoqianming]
## 2.6.2025060913
- feat: 新增功能
- inm 和 wpm添加wpr number的查询条件 [caoqianming]
- 优化设备采集cd [caoqianming]
- mlogb cal_count_pn_jgqbl [caoqianming]
## 2.6.2025060617
- feat: 新增功能
- mlogbw也进行mlogb.cal_count_notok() [caoqianming]

View File

@ -35,7 +35,7 @@ sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
ALLOWED_HOSTS = ['*']
SYS_NAME = '星途工厂综合管理系统'
SYS_VERSION = '2.6.2025060617'
SYS_VERSION = '2.6.2025061715'
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Application definition