Commit b4a72f15 by dong

fix20230411

parent c61daf0b
......@@ -56,153 +56,6 @@ def industry_distribute():
return jsonify(code=RET.DBERR, msg="数据库错误")
# 数据通报
@api_attract.route("/DataNotification", methods=["GET"])
def data_notification():
page_html_list = [
"index.shtml", # 首页
"index_1.shtml", # 第2页
"index_2.shtml", # 第3页
"index_3.shtml", # 第4页
"index_4.shtml", # 第5页
"index_5.shtml", # 第6页
"index_6.shtml", # 第7页
]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54'}
data_list = []
for page_index in page_html_list:
url = "http://zsj.jcgov.gov.cn/sjtb/sjtb/{}".format(page_index)
try:
respose = requests.get(url, headers=headers)
respose.encoding = 'utf-8'
html_etree = ''
if respose.status_code == 200:
html_etree = etree.HTML(respose.text)
# handeled_html_str = etree.tostring(html_etree).decode()
# print(handeled_html_str)
content_name_list = html_etree.xpath('//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/text()')
content_url_list = html_etree.xpath('//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/@href')
time_list = html_etree.xpath('//div[@class="newslist newslistdixx"]//span[2]/text()')
i = 0
for content_name in content_name_list:
content_url = content_url_list[i].replace("./", 'http://zsj.jcgov.gov.cn/sjtb/sjtb/'),
respose = requests.get(content_url[0], headers=headers)
respose.encoding = 'utf-8'
html_etree1 = ''
if respose.status_code == 200:
html_etree1 = etree.HTML(respose.text)
# handeled_html_str1 = etree.tostring(html_etree).decode()
# print(handeled_html_str1)
content = html_etree1.xpath('//table[1]')[2]
result = etree.tostring(content, encoding='utf-8').decode()
data_dic = {
"content_name": content_name,
# "content_url": content_url_list[i].replace('./', 'http://zsj.jcgov.gov.cn/sjtb/sjtb/'),
"content": result,
"time": time_list[i][1:-1]
}
data_list.append(data_dic)
i += 1
continue
continue
except Exception as e:
current_app.logger.error(e)
return jsonify(code=RET.DBERR, msg="数据库错误")
# 存入数据库
for data in data_list:
content_name = data['content_name']
inform_obj = InvestmentInformation.query.filter_by(flag=2, name=content_name).first()
if inform_obj:
continue
inform_obj = InvestmentInformation()
inform_obj.name = data['content_name']
inform_obj.flag = 2
inform_obj.content = data['content']
inform_obj.time = data['time']
inform_obj.source = '晋城市投资促进中心'
inform_obj.info = ''
inform_obj.time = data['time']
db.session.add(inform_obj)
db.session.commit()
return jsonify(code=RET.OK, msg="获取成功", data=data_list)
# 工作动态
@api_attract.route("/WorkTrend", methods=["GET"])
def work_trend():
page_html_list = ["index.shtml"]
for i in range(1, 25):
html_index = 'index_{}.shtml'.format(i)
page_html_list.append(html_index)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54'}
data_list = []
for page_index in page_html_list:
url = "http://zsj.jcgov.gov.cn/xwdt/zhxx/{}".format(page_index)
try:
respose = requests.get(url, headers=headers)
respose.encoding = 'utf-8'
if respose.status_code == 200:
html_etree = etree.HTML(respose.text)
# handeled_html_str = etree.tostring(html_etree).decode()
# print(handeled_html_str)
content_name_list = html_etree.xpath('//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/text()')
content_url_list = html_etree.xpath('//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/@href')
time_list = html_etree.xpath('//div[@class="newslist newslistdixx"]//span[2]/text()')
i = 0
for content_name in content_name_list:
content_url = content_url_list[i].replace('./', 'http://zsj.jcgov.gov.cn/xwdt/zhxx/')
content_html = requests.get(content_url, headers=headers)
content_html.encoding = 'utf-8'
if content_html.status_code == 200:
html_etree1 = etree.HTML(content_html.text)
# handeled_html_str = etree.tostring(html_etree1).decode()
# print(handeled_html_str)
content_info_list = html_etree1.xpath(
'//div[@class="view TRS_UEDITOR trs_paper_default trs_web"]/p/text()')
content_info1 = ''
for content_info in content_info_list:
content_info1 = content_info1 + '<br>' + content_info
data_dic = {
"content_name": content_name,
"content_info": content_info1,
"time": time_list[i][1:-1]
}
data_list.append(data_dic)
i += 1
continue
continue
except Exception as e:
current_app.logger.error(e)
return jsonify(code=RET.DBERR, msg="数据库错误")
# # 存入数据库
for data in data_list:
content_name = data['content_name']
inform_obj = InvestmentInformation.query.filter_by(flag=1, name=content_name).first()
if inform_obj:
continue
inform_obj = InvestmentInformation()
inform_obj.name = data['content_name']
inform_obj.flag = 1
inform_obj.time = data['time']
inform_obj.source = '晋城市投资促进中心'
inform_obj.content = data['content_info']
db.session.add(inform_obj)
db.session.commit()
return jsonify(code=RET.OK, msg="获取成功", data=data_list)
def get_jjzb(district, flag):
'''经济指标数据'''
try:
......
......@@ -56,18 +56,24 @@ def attract_map():
Enterprise.c_type1 == inid,
Enterprise.c_type2 == inid
))
company1 = []
if product:
enterprise = enterprise.filter(or_(
Enterprise.product_all.like("%{}%".format(product)),
Enterprise.product_all1.like("%{}%".format(product)),
Enterprise.product_all2.like("%{}%".format(product))
))
# enterprise = enterprise.filter(or_(
# Enterprise.product_all.like("%{}%".format(product)),
# Enterprise.product_all1.like("%{}%".format(product)),
# Enterprise.product_all2.like("%{}%".format(product))
# ))
company1, size = get_enterprise_num(product, inid)
# print(enterprise)
df = list()
if not province: # 全国,省数据
provinces = Enterprise.query.with_entities(Enterprise.province).distinct().all()
provinces = [i[0] for i in provinces if i[0]] # 拿到省份的无重复值
for pro in provinces:
if product:
num = len([company for company in company1 if company.province == pro])
else:
num = enterprise.filter_by(province=pro).count()
# province_data = Enterprise.query.filter_by(province=pro).first()
df.append({"name": pro,
......@@ -82,6 +88,9 @@ def attract_map():
cities = Enterprise.query.filter_by(province=province).with_entities(Enterprise.city).distinct().all()
cities = [i[0] for i in cities if i[0]] # 拿到城市的无重复值
for cit in cities:
if product:
num = len([company for company in company1 if company.province == province and company.city == cit])
else:
num = enterprise.filter_by(province=province, city=cit).count()
city_data = Enterprise.query.filter_by(province=province, city=cit).first()
df.append({"name": cit,
......@@ -97,6 +106,9 @@ def attract_map():
Enterprise.district).distinct().all()
districts = [i[0] for i in districts if i[0]] # 拿到区县的无重复值
for dis in districts:
if product:
num = len([company for company in company1 if company.province == province and company.city == city and company.district == dis])
else:
num = enterprise.filter_by(province=province, city=city, district=dis).count()
district_data = Enterprise.query.filter_by(province=province, city=city, district=dis).first()
df.append({"name": dis,
......@@ -108,6 +120,9 @@ def attract_map():
redis_store.setex(name_query, 30 * 24 * 3600, json.dumps(df))
return jsonify(code=RET.OK, msg="获取成功", data=df)
if province and city and district: # 区数据
if product:
num = len([company for company in company1 if company.province == province and company.city == city and company.district == dis])
else:
num = enterprise.filter_by(province=province, city=city, district=district).count()
district_data = Enterprise.query.filter_by(province=province, city=city, district=district).first()
# print(district_data)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment