|
@@ -1,93 +1,93 @@
|
|
|
-
|
|
|
-#!/usr/bin/env python
|
|
|
-# -*- encoding: utf-8 -*-
|
|
|
-'''
|
|
|
-@Contact : liuyuqi.gov@msn.cn
|
|
|
-@Time : 2022/06/28 23:15:05
|
|
|
-@License : Copyright © 2017-2022 liuyuqi. All Rights Reserved.
|
|
|
-@Desc :
|
|
|
-'''
|
|
|
-
|
|
|
-from http import cookies
|
|
|
-import requests
|
|
|
-import sys,os,json
|
|
|
-from crawl_baidu.lib.json_conf import JsonConf
|
|
|
-import time
|
|
|
-headers = {
|
|
|
- 'Accept': 'application/json, text/plain, */*',
|
|
|
- 'Accept-Encoding': 'gzip, deflate',
|
|
|
- 'Accept-Language': 'zh-CN,zh;q=0.9',
|
|
|
- 'Cache-Control': 'no-cache',
|
|
|
- 'DNT': '1',
|
|
|
- 'Host': 'index.baidu.com',
|
|
|
- 'Pragma': 'no-cache',
|
|
|
- 'Proxy-Connection': 'keep-alive',
|
|
|
- 'Referer': 'https://index.baidu.com/v2/main/index.html',
|
|
|
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36',
|
|
|
- 'X-Requested-With': 'XMLHttpRequest',
|
|
|
-}
|
|
|
-
|
|
|
-class CrawlBaidu():
|
|
|
- def __init__(self):
|
|
|
- self.sess=requests.Session()
|
|
|
- self.jsonConf = JsonConf()
|
|
|
- self.conf = self.jsonConf.load()
|
|
|
- cookie = self.conf.get('cookie')
|
|
|
- CliperText = self.conf.get('CliperText')
|
|
|
- self.words = self.conf.get('words')
|
|
|
- # self.sess.cookies.update(cookie)
|
|
|
- self.sess.headers.update({
|
|
|
- "Cipher-Text":CliperText,
|
|
|
- "Cookie" : cookie
|
|
|
- })
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def decrypt(t,e):
|
|
|
- n = list(t)
|
|
|
- i = list(e)
|
|
|
- a = {}
|
|
|
- result = []
|
|
|
- ln = int(len(n)/2)
|
|
|
- start = n[ln:]
|
|
|
- end = n[:ln]
|
|
|
- for j,k in zip(start, end):
|
|
|
- a.update({k: j})
|
|
|
- for j in e:
|
|
|
- result.append(a.get(j))
|
|
|
- return ''.join(result)
|
|
|
-
|
|
|
- def get_ptbk(self,uniqid):
|
|
|
- url = 'http://index.baidu.com/Interface/ptbk?uniqid={}'
|
|
|
- resp = self.sess.get(url.format(uniqid), headers=headers)
|
|
|
- if resp.status_code != 200:
|
|
|
- print('获取uniqid失败')
|
|
|
- sys.exit(1)
|
|
|
- return resp.json().get('data')
|
|
|
-
|
|
|
- def get_index_data(self, start='2011-01-03', end='2022-08-05'):
|
|
|
- keyword = str(self.words).replace("'", '"')
|
|
|
- url = f'http://index.baidu.com/api/SearchApi/index?area=0&word={keyword}&area=0&startDate={start}&endDate={end}'
|
|
|
- resp = self.sess.get(url, headers=headers)
|
|
|
- if resp.status_code != 200:
|
|
|
- print('获取指数失败')
|
|
|
- sys.exit(1)
|
|
|
-
|
|
|
- content = resp.json()
|
|
|
- data = content.get('data')
|
|
|
- user_indexes = data.get('userIndexes')[0]
|
|
|
- uniqid = data.get('uniqid')
|
|
|
- ptbk = self.get_ptbk(uniqid)
|
|
|
-
|
|
|
- while ptbk is None or ptbk == '':
|
|
|
- ptbk = self.get_ptbk(uniqid)
|
|
|
-
|
|
|
- all_data = user_indexes.get('all').get('data')
|
|
|
- result = CrawlBaidu.decrypt(ptbk, all_data)
|
|
|
- result = result.split(',')
|
|
|
-
|
|
|
- print(result)
|
|
|
-
|
|
|
- if not os.path.exists("data"):
|
|
|
- os.mkdir("data")
|
|
|
- with open("data/res.txt","w") as file:
|
|
|
+
|
|
|
+#!/usr/bin/env python
|
|
|
+# -*- encoding: utf-8 -*-
|
|
|
+'''
|
|
|
+@Contact : liuyuqi.gov@msn.cn
|
|
|
+@Time : 2022/06/28 23:15:05
|
|
|
+@License : Copyright © 2017-2022 liuyuqi. All Rights Reserved.
|
|
|
+@Desc :
|
|
|
+'''
|
|
|
+
|
|
|
+from http import cookies
|
|
|
+import requests
|
|
|
+import sys,os,json
|
|
|
+from crawl_baidu.utils.json_conf import JsonConf
|
|
|
+import time
|
|
|
+headers = {
|
|
|
+ 'Accept': 'application/json, text/plain, */*',
|
|
|
+ 'Accept-Encoding': 'gzip, deflate',
|
|
|
+ 'Accept-Language': 'zh-CN,zh;q=0.9',
|
|
|
+ 'Cache-Control': 'no-cache',
|
|
|
+ 'DNT': '1',
|
|
|
+ 'Host': 'index.baidu.com',
|
|
|
+ 'Pragma': 'no-cache',
|
|
|
+ 'Proxy-Connection': 'keep-alive',
|
|
|
+ 'Referer': 'https://index.baidu.com/v2/main/index.html',
|
|
|
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36',
|
|
|
+ 'X-Requested-With': 'XMLHttpRequest',
|
|
|
+}
|
|
|
+
|
|
|
+class CrawlBaidu():
|
|
|
+ def __init__(self):
|
|
|
+ self.sess=requests.Session()
|
|
|
+ self.jsonConf = JsonConf()
|
|
|
+ self.conf = self.jsonConf.load()
|
|
|
+ cookie = self.conf.get('cookie')
|
|
|
+ CliperText = self.conf.get('CliperText')
|
|
|
+ self.words = self.conf.get('words')
|
|
|
+ # self.sess.cookies.update(cookie)
|
|
|
+ self.sess.headers.update({
|
|
|
+ "Cipher-Text":CliperText,
|
|
|
+ "Cookie" : cookie
|
|
|
+ })
|
|
|
+
|
|
|
+ @staticmethod
|
|
|
+ def decrypt(t,e):
|
|
|
+ n = list(t)
|
|
|
+ i = list(e)
|
|
|
+ a = {}
|
|
|
+ result = []
|
|
|
+ ln = int(len(n)/2)
|
|
|
+ start = n[ln:]
|
|
|
+ end = n[:ln]
|
|
|
+ for j,k in zip(start, end):
|
|
|
+ a.update({k: j})
|
|
|
+ for j in e:
|
|
|
+ result.append(a.get(j))
|
|
|
+ return ''.join(result)
|
|
|
+
|
|
|
+ def get_ptbk(self,uniqid):
|
|
|
+ url = 'http://index.baidu.com/Interface/ptbk?uniqid={}'
|
|
|
+ resp = self.sess.get(url.format(uniqid), headers=headers)
|
|
|
+ if resp.status_code != 200:
|
|
|
+ print('获取uniqid失败')
|
|
|
+ sys.exit(1)
|
|
|
+ return resp.json().get('data')
|
|
|
+
|
|
|
+ def get_index_data(self, start='2011-01-03', end='2022-08-05'):
|
|
|
+ keyword = str(self.words).replace("'", '"')
|
|
|
+ url = f'http://index.baidu.com/api/SearchApi/index?area=0&word={keyword}&area=0&startDate={start}&endDate={end}'
|
|
|
+ resp = self.sess.get(url, headers=headers)
|
|
|
+ if resp.status_code != 200:
|
|
|
+ print('获取指数失败')
|
|
|
+ sys.exit(1)
|
|
|
+
|
|
|
+ content = resp.json()
|
|
|
+ data = content.get('data')
|
|
|
+ user_indexes = data.get('userIndexes')[0]
|
|
|
+ uniqid = data.get('uniqid')
|
|
|
+ ptbk = self.get_ptbk(uniqid)
|
|
|
+
|
|
|
+ while ptbk is None or ptbk == '':
|
|
|
+ ptbk = self.get_ptbk(uniqid)
|
|
|
+
|
|
|
+ all_data = user_indexes.get('all').get('data')
|
|
|
+ result = CrawlBaidu.decrypt(ptbk, all_data)
|
|
|
+ result = result.split(',')
|
|
|
+
|
|
|
+ print(result)
|
|
|
+
|
|
|
+ if not os.path.exists("data"):
|
|
|
+ os.mkdir("data")
|
|
|
+ with open("data/res.txt","w") as file:
|
|
|
file.write(json.dumps(result))
|