Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
M
mancheng
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
mancheng
Commits
b4a72f15
Commit
b4a72f15
authored
Apr 11, 2023
by
dong
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix20230411
parent
c61daf0b
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
20 additions
and
152 deletions
+20
-152
apps/view_index/view.py
+0
-147
apps/view_map/view.py
+20
-5
No files found.
apps/view_index/view.py
View file @
b4a72f15
...
@@ -56,153 +56,6 @@ def industry_distribute():
...
@@ -56,153 +56,6 @@ def industry_distribute():
return
jsonify
(
code
=
RET
.
DBERR
,
msg
=
"数据库错误"
)
return
jsonify
(
code
=
RET
.
DBERR
,
msg
=
"数据库错误"
)
# 数据通报
@api_attract.route
(
"/DataNotification"
,
methods
=
[
"GET"
])
def
data_notification
():
page_html_list
=
[
"index.shtml"
,
# 首页
"index_1.shtml"
,
# 第2页
"index_2.shtml"
,
# 第3页
"index_3.shtml"
,
# 第4页
"index_4.shtml"
,
# 第5页
"index_5.shtml"
,
# 第6页
"index_6.shtml"
,
# 第7页
]
headers
=
{
'User-Agent'
:
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54'
}
data_list
=
[]
for
page_index
in
page_html_list
:
url
=
"http://zsj.jcgov.gov.cn/sjtb/sjtb/{}"
.
format
(
page_index
)
try
:
respose
=
requests
.
get
(
url
,
headers
=
headers
)
respose
.
encoding
=
'utf-8'
html_etree
=
''
if
respose
.
status_code
==
200
:
html_etree
=
etree
.
HTML
(
respose
.
text
)
# handeled_html_str = etree.tostring(html_etree).decode()
# print(handeled_html_str)
content_name_list
=
html_etree
.
xpath
(
'//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/text()'
)
content_url_list
=
html_etree
.
xpath
(
'//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/@href'
)
time_list
=
html_etree
.
xpath
(
'//div[@class="newslist newslistdixx"]//span[2]/text()'
)
i
=
0
for
content_name
in
content_name_list
:
content_url
=
content_url_list
[
i
]
.
replace
(
"./"
,
'http://zsj.jcgov.gov.cn/sjtb/sjtb/'
),
respose
=
requests
.
get
(
content_url
[
0
],
headers
=
headers
)
respose
.
encoding
=
'utf-8'
html_etree1
=
''
if
respose
.
status_code
==
200
:
html_etree1
=
etree
.
HTML
(
respose
.
text
)
# handeled_html_str1 = etree.tostring(html_etree).decode()
# print(handeled_html_str1)
content
=
html_etree1
.
xpath
(
'//table[1]'
)[
2
]
result
=
etree
.
tostring
(
content
,
encoding
=
'utf-8'
)
.
decode
()
data_dic
=
{
"content_name"
:
content_name
,
# "content_url": content_url_list[i].replace('./', 'http://zsj.jcgov.gov.cn/sjtb/sjtb/'),
"content"
:
result
,
"time"
:
time_list
[
i
][
1
:
-
1
]
}
data_list
.
append
(
data_dic
)
i
+=
1
continue
continue
except
Exception
as
e
:
current_app
.
logger
.
error
(
e
)
return
jsonify
(
code
=
RET
.
DBERR
,
msg
=
"数据库错误"
)
# 存入数据库
for
data
in
data_list
:
content_name
=
data
[
'content_name'
]
inform_obj
=
InvestmentInformation
.
query
.
filter_by
(
flag
=
2
,
name
=
content_name
)
.
first
()
if
inform_obj
:
continue
inform_obj
=
InvestmentInformation
()
inform_obj
.
name
=
data
[
'content_name'
]
inform_obj
.
flag
=
2
inform_obj
.
content
=
data
[
'content'
]
inform_obj
.
time
=
data
[
'time'
]
inform_obj
.
source
=
'晋城市投资促进中心'
inform_obj
.
info
=
''
inform_obj
.
time
=
data
[
'time'
]
db
.
session
.
add
(
inform_obj
)
db
.
session
.
commit
()
return
jsonify
(
code
=
RET
.
OK
,
msg
=
"获取成功"
,
data
=
data_list
)
# 工作动态
@api_attract.route
(
"/WorkTrend"
,
methods
=
[
"GET"
])
def
work_trend
():
page_html_list
=
[
"index.shtml"
]
for
i
in
range
(
1
,
25
):
html_index
=
'index_{}.shtml'
.
format
(
i
)
page_html_list
.
append
(
html_index
)
headers
=
{
'User-Agent'
:
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54'
}
data_list
=
[]
for
page_index
in
page_html_list
:
url
=
"http://zsj.jcgov.gov.cn/xwdt/zhxx/{}"
.
format
(
page_index
)
try
:
respose
=
requests
.
get
(
url
,
headers
=
headers
)
respose
.
encoding
=
'utf-8'
if
respose
.
status_code
==
200
:
html_etree
=
etree
.
HTML
(
respose
.
text
)
# handeled_html_str = etree.tostring(html_etree).decode()
# print(handeled_html_str)
content_name_list
=
html_etree
.
xpath
(
'//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/text()'
)
content_url_list
=
html_etree
.
xpath
(
'//div[@class="newslist newslistdixx"]//span[@class="list_newstitle"]/a/@href'
)
time_list
=
html_etree
.
xpath
(
'//div[@class="newslist newslistdixx"]//span[2]/text()'
)
i
=
0
for
content_name
in
content_name_list
:
content_url
=
content_url_list
[
i
]
.
replace
(
'./'
,
'http://zsj.jcgov.gov.cn/xwdt/zhxx/'
)
content_html
=
requests
.
get
(
content_url
,
headers
=
headers
)
content_html
.
encoding
=
'utf-8'
if
content_html
.
status_code
==
200
:
html_etree1
=
etree
.
HTML
(
content_html
.
text
)
# handeled_html_str = etree.tostring(html_etree1).decode()
# print(handeled_html_str)
content_info_list
=
html_etree1
.
xpath
(
'//div[@class="view TRS_UEDITOR trs_paper_default trs_web"]/p/text()'
)
content_info1
=
''
for
content_info
in
content_info_list
:
content_info1
=
content_info1
+
'<br>'
+
content_info
data_dic
=
{
"content_name"
:
content_name
,
"content_info"
:
content_info1
,
"time"
:
time_list
[
i
][
1
:
-
1
]
}
data_list
.
append
(
data_dic
)
i
+=
1
continue
continue
except
Exception
as
e
:
current_app
.
logger
.
error
(
e
)
return
jsonify
(
code
=
RET
.
DBERR
,
msg
=
"数据库错误"
)
# # 存入数据库
for
data
in
data_list
:
content_name
=
data
[
'content_name'
]
inform_obj
=
InvestmentInformation
.
query
.
filter_by
(
flag
=
1
,
name
=
content_name
)
.
first
()
if
inform_obj
:
continue
inform_obj
=
InvestmentInformation
()
inform_obj
.
name
=
data
[
'content_name'
]
inform_obj
.
flag
=
1
inform_obj
.
time
=
data
[
'time'
]
inform_obj
.
source
=
'晋城市投资促进中心'
inform_obj
.
content
=
data
[
'content_info'
]
db
.
session
.
add
(
inform_obj
)
db
.
session
.
commit
()
return
jsonify
(
code
=
RET
.
OK
,
msg
=
"获取成功"
,
data
=
data_list
)
def
get_jjzb
(
district
,
flag
):
def
get_jjzb
(
district
,
flag
):
'''经济指标数据'''
'''经济指标数据'''
try
:
try
:
...
...
apps/view_map/view.py
View file @
b4a72f15
...
@@ -56,18 +56,24 @@ def attract_map():
...
@@ -56,18 +56,24 @@ def attract_map():
Enterprise
.
c_type1
==
inid
,
Enterprise
.
c_type1
==
inid
,
Enterprise
.
c_type2
==
inid
Enterprise
.
c_type2
==
inid
))
))
company1
=
[]
if
product
:
if
product
:
enterprise
=
enterprise
.
filter
(
or_
(
# enterprise = enterprise.filter(or_(
Enterprise
.
product_all
.
like
(
"
%
{}
%
"
.
format
(
product
)),
# Enterprise.product_all.like("%{}%".format(product)),
Enterprise
.
product_all1
.
like
(
"
%
{}
%
"
.
format
(
product
)),
# Enterprise.product_all1.like("%{}%".format(product)),
Enterprise
.
product_all2
.
like
(
"
%
{}
%
"
.
format
(
product
))
# Enterprise.product_all2.like("%{}%".format(product))
))
# ))
company1
,
size
=
get_enterprise_num
(
product
,
inid
)
# print(enterprise)
# print(enterprise)
df
=
list
()
df
=
list
()
if
not
province
:
# 全国,省数据
if
not
province
:
# 全国,省数据
provinces
=
Enterprise
.
query
.
with_entities
(
Enterprise
.
province
)
.
distinct
()
.
all
()
provinces
=
Enterprise
.
query
.
with_entities
(
Enterprise
.
province
)
.
distinct
()
.
all
()
provinces
=
[
i
[
0
]
for
i
in
provinces
if
i
[
0
]]
# 拿到省份的无重复值
provinces
=
[
i
[
0
]
for
i
in
provinces
if
i
[
0
]]
# 拿到省份的无重复值
for
pro
in
provinces
:
for
pro
in
provinces
:
if
product
:
num
=
len
([
company
for
company
in
company1
if
company
.
province
==
pro
])
else
:
num
=
enterprise
.
filter_by
(
province
=
pro
)
.
count
()
num
=
enterprise
.
filter_by
(
province
=
pro
)
.
count
()
# province_data = Enterprise.query.filter_by(province=pro).first()
# province_data = Enterprise.query.filter_by(province=pro).first()
df
.
append
({
"name"
:
pro
,
df
.
append
({
"name"
:
pro
,
...
@@ -82,6 +88,9 @@ def attract_map():
...
@@ -82,6 +88,9 @@ def attract_map():
cities
=
Enterprise
.
query
.
filter_by
(
province
=
province
)
.
with_entities
(
Enterprise
.
city
)
.
distinct
()
.
all
()
cities
=
Enterprise
.
query
.
filter_by
(
province
=
province
)
.
with_entities
(
Enterprise
.
city
)
.
distinct
()
.
all
()
cities
=
[
i
[
0
]
for
i
in
cities
if
i
[
0
]]
# 拿到城市的无重复值
cities
=
[
i
[
0
]
for
i
in
cities
if
i
[
0
]]
# 拿到城市的无重复值
for
cit
in
cities
:
for
cit
in
cities
:
if
product
:
num
=
len
([
company
for
company
in
company1
if
company
.
province
==
province
and
company
.
city
==
cit
])
else
:
num
=
enterprise
.
filter_by
(
province
=
province
,
city
=
cit
)
.
count
()
num
=
enterprise
.
filter_by
(
province
=
province
,
city
=
cit
)
.
count
()
city_data
=
Enterprise
.
query
.
filter_by
(
province
=
province
,
city
=
cit
)
.
first
()
city_data
=
Enterprise
.
query
.
filter_by
(
province
=
province
,
city
=
cit
)
.
first
()
df
.
append
({
"name"
:
cit
,
df
.
append
({
"name"
:
cit
,
...
@@ -97,6 +106,9 @@ def attract_map():
...
@@ -97,6 +106,9 @@ def attract_map():
Enterprise
.
district
)
.
distinct
()
.
all
()
Enterprise
.
district
)
.
distinct
()
.
all
()
districts
=
[
i
[
0
]
for
i
in
districts
if
i
[
0
]]
# 拿到区县的无重复值
districts
=
[
i
[
0
]
for
i
in
districts
if
i
[
0
]]
# 拿到区县的无重复值
for
dis
in
districts
:
for
dis
in
districts
:
if
product
:
num
=
len
([
company
for
company
in
company1
if
company
.
province
==
province
and
company
.
city
==
city
and
company
.
district
==
dis
])
else
:
num
=
enterprise
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
dis
)
.
count
()
num
=
enterprise
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
dis
)
.
count
()
district_data
=
Enterprise
.
query
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
dis
)
.
first
()
district_data
=
Enterprise
.
query
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
dis
)
.
first
()
df
.
append
({
"name"
:
dis
,
df
.
append
({
"name"
:
dis
,
...
@@ -108,6 +120,9 @@ def attract_map():
...
@@ -108,6 +120,9 @@ def attract_map():
redis_store
.
setex
(
name_query
,
30
*
24
*
3600
,
json
.
dumps
(
df
))
redis_store
.
setex
(
name_query
,
30
*
24
*
3600
,
json
.
dumps
(
df
))
return
jsonify
(
code
=
RET
.
OK
,
msg
=
"获取成功"
,
data
=
df
)
return
jsonify
(
code
=
RET
.
OK
,
msg
=
"获取成功"
,
data
=
df
)
if
province
and
city
and
district
:
# 区数据
if
province
and
city
and
district
:
# 区数据
if
product
:
num
=
len
([
company
for
company
in
company1
if
company
.
province
==
province
and
company
.
city
==
city
and
company
.
district
==
dis
])
else
:
num
=
enterprise
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
district
)
.
count
()
num
=
enterprise
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
district
)
.
count
()
district_data
=
Enterprise
.
query
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
district
)
.
first
()
district_data
=
Enterprise
.
query
.
filter_by
(
province
=
province
,
city
=
city
,
district
=
district
)
.
first
()
# print(district_data)
# print(district_data)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment