1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
| import requests from fake_useragent import UserAgent from threading import Thread,Lock,BoundedSemaphore import json import re from typing import Dict, List from bs4 import BeautifulSoup import logging import time import os from queue import Queue
import random
logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.INFO) logger.addHandler(stream_handler) lock = Lock() semaphore = BoundedSemaphore(5)
def get_proxy(): api_url = 'http*******************' try: time.sleep(0.2) response = requests.get(api_url, timeout=10) response.raise_for_status() data = json.loads(response.content) proxy_list = data.get('data', []) proxy_list = proxy_list.get('proxy_list',[]) item = proxy_list[0] if isinstance(item, str) and ':' in item: ip, remain = item.split(':', 1) port = remain.split(',')[0] return f"http://{ip}:{port}" except Exception as e: logger.error(f"获取代理失败: {e}") return None
def get_url(stock,page): return f"https://guba.eastmoney.com/list,{stock}_{page}.html"
def extract_posts(data: Dict,stock_num:int) -> List[Dict]: """Extract and format post information from raw data.""" posts = [] for post in data.get('re', []): try: title = post.get('post_title', '').strip() post_info = { 'id': post.get('post_id'), 'title': post['post_title'], 'author': post.get('user_nickname', '').strip(), 'click_count': post.get('post_click_count', 0), 'comment_count': post.get('post_comment_count', 0), 'publish_time': post.get('post_publish_time', ''), 'last_time': post.get('post_last_time', ''), 'source_url': f"https://guba.eastmoney.com/news,{stock_num},{post.get('post_id')}.html" } posts.append(post_info) except (KeyError, AttributeError) as e: logger.warning(f"Skipping malformed post: {str(e)}") return posts
def parse_page(page_source: str, page: int,stock_num:int) -> List[Dict]: soup = BeautifulSoup(page_source, 'html.parser') script_tags = soup.find_all('script') for script in script_tags: if script.string and 'var article_list' in script.string: try: match = re.search(r'var article_list\s*=\s*({.*?});', script.string, re.DOTALL) if match: json_str = match.group(1) json_str = re.sub(r',\s*}(?=\s*$)', '}', json_str) json_str = re.sub(r',\s*](?=\s*$)', ']', json_str) data = json.loads(json_str) return extract_posts(data,stock_num) except (json.JSONDecodeError, AttributeError, KeyError) as e: logger.error(f"解析页面{page}错误: {str(e)}") continue def get_page(stock, page, proxy): headers = { 'User-Agent': UserAgent().random, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', } proxies = { "http": proxy, "https": proxy } url = get_url(stock, page) try: response = requests.get(url, headers=headers, proxies=proxies, timeout=10) print(f"Page {page} of stock {stock} fetched successfully.") return response.content.decode() except Exception as e: print(f"Failed to fetch page {page} of stock {stock}: {e}")
def save_to_json(posts: List[Dict], file_path: str): """将帖子信息保存到 JSON 文件中""" with lock: if os.path.exists(file_path): with open(file_path, 'r', encoding='utf-8') as f: existing_data = json.load(f) existing_data.extend(posts) posts = existing_data
with open(file_path, 'w', encoding='utf-8') as f: json.dump(posts, f, ensure_ascii=False, indent=4) logger.info(f"数据已保存到 {file_path}")
def load_progress(progress_file: str) -> int: """从进度文件中加载已爬取的最大页数""" if os.path.exists(progress_file): with open(progress_file, 'r', encoding='utf-8') as f: try: return int(f.read().strip()) except ValueError: pass return 0
def update_progress(progress_file: str, current_page: int): """更新进度文件中的最大页数""" with open(progress_file, 'a+', encoding='utf-8') as f: f.write(str(current_page)+'\n') logger.info(f"进度已更新到 {progress_file},当前页数:{current_page}")
def process_pages(stock_code: str, start_page: int, end_page: int, output_file: str): """处理指定范围内的多个页面""" semaphore.acquire() proxy = get_proxy() if not proxy: logger.error("未能获取代理 IP,跳过线程创建。") semaphore.release() return try: for page in range(start_page, end_page + 1): page_source = get_page(stock_code, page, proxy) if not page_source: logger.error(f"无法获取页面源码,跳过页面 {page}。") continue
posts = parse_page(page_source, page, stock_code) if posts: logger.info(f"成功提取页面 {page} 的 {len(posts)} 条帖子信息。") save_to_json(posts, output_file) update_progress("./log", page)
else: logger.warning(f"页面 {page} 未提取到任何帖子信息。")
time.sleep(abs(random.uniform(3,7))) finally: semaphore.release()
def main(stock_code: str, total_pages: int, pages_per_thread: int): """主函数:动态分配任务给多个线程""" output_file = f"{stock_code}_posts.json" threads = [] num_threads = total_pages // pages_per_thread + (1 if total_pages % pages_per_thread != 0 else 0)
for i in range(num_threads): start_page = i * pages_per_thread + 1 end_page = min((i + 1) * pages_per_thread, total_pages)
thread = Thread(target=process_pages, args=(stock_code, start_page, end_page, output_file)) threads.append(thread) thread.start()
for t in threads: t.join()
def process_missing_pages(stock_code: str, pages: list, output_file: str): """处理缺失的多个页面""" semaphore.acquire() proxy = get_proxy() if not proxy: logger.error("未能获取代理 IP,跳过线程创建。") semaphore.release() return try: for page in pages: page_source = get_page(stock_code, page, proxy) if not page_source: logger.error(f"无法获取页面源码,跳过页面 {page}。") continue
posts = parse_page(page_source, page, stock_code) if posts: logger.info(f"成功提取页面 {page} 的 {len(posts)} 条帖子信息。") save_to_json(posts, output_file) update_progress("./log", page) else: logger.warning(f"页面 {page} 未提取到任何帖子信息。")
time.sleep(abs(random.uniform(3,7))) finally: semaphore.release()
def crawl_missing_pages(stock_code: str, missing_pages: list, batch_size: int = 20): """主函数:多线程爬取缺失的页面""" output_file = f"{stock_code}_posts.json" threads = [] for i in range(0, len(missing_pages), batch_size): batch = missing_pages[i:i + batch_size] thread = Thread( target=process_missing_pages, args=(stock_code, batch, output_file) ) threads.append(thread) thread.start() time.sleep(1)
for t in threads: t.join() if __name__ =='__main__': stock_code = "300750" total_pages = 3300 threads_count = 5
try: with open("./log", "r", encoding='utf-8') as f: codes = f.read().split('\n') except FileNotFoundError: codes = [] codes = list(set(codes)) codes = [int(i) for i in codes if i.isdigit()] codes.sort() missing_pages = [] for i in range(1, total_pages + 1): if i not in codes: print(f"缺失页码:{i}") missing_pages.append(i) print(f"共发现 {len(missing_pages)} 个缺失页面") if missing_pages: crawl_missing_pages(stock_code, missing_pages) print("缺失页面爬取完成!") else: print("没有缺失页面需要爬取。") print("没有缺失页面需要爬取。")
|