韩晓莉的作业三

代码


  Created on Wed May 11 19:22:21 2022

  @author: 谢钦焱、韩晓莉、孙真真
  """

  from bs4 import BeautifulSoup
  import re
  import pandas as pd
  from selenium import webdriver
  from selenium.webdriver.common.by import By
  from selenium.webdriver.common.keys import Keys

  browser = webdriver.Chrome()
  browser.get('http://www.szse.cn/disclosure/listed/fixed/index.html')

  elem = browser.find_element(By.ID,'input_code')
  elem.send_keys('兴业矿业' + Keys.RETURN)

  browser.find_element(By.LINK_TEXT,'请选择公告类别').click()
  browser.find_element(By.LINK_TEXT,'年度报告').click()  # 网页上是点击选择,所以click

  y_start = browser.find_element(By.CLASS_NAME,'input-left')
  y_start.send_keys('2012' + Keys.RETURN)
  y_end = browser.find_element(By.CLASS_NAME,'input-right')
  y_start.send_keys('2021' + Keys.RETURN)       # 网页上时间需要输入,所以用Key.RETURN

  element = browser.find_element(By.ID,"disclosure-table")
  innerHTML = element.get_attribute("innerHTML")

  f = open("HW3.html",'w',encoding='utf-8')
  f.write(innerHTML)
  f.close()
  browser.quit()


  class DisclosureTable():
      '''
      解析深交所定期报告页搜索表格
      '''
      def __init__(self, innerHTML):
          self.html = innerHTML
          self.prefix = 'https://disc.szse.cn/download'
          self.prefix_href = 'https://www.szse.cn/'
          #
          p_a = re.compile('(.*?)', re.DOTALL)
          p_span = re.compile('(.*?)', re.DOTALL)
          self.get_code = lambda txt: p_a.search(txt).group(1).strip()
          self.get_time = lambda txt: p_span.search(txt).group(1).strip()
          #
          self.txt_to_df()

      def txt_to_df(self):
          # html table text to DataFrame
          html = self.html
          p = re.compile('(.*?)', re.DOTALL)
          trs = p.findall(html)

          p2 = re.compile('(.*?)', re.DOTALL)
          tds = [p2.findall(tr) for tr in trs[1:]]

          df = pd.DataFrame({'证券代码': [td[0] for td in tds],
                             '简称': [td[1] for td in tds],
                             '公告标题': [td[2] for td in tds],
                             '公告时间': [td[3] for td in tds]})
          self.df_txt = df

      def get_link(self, txt):
          p_txt = '(.*?)'
          p = re.compile(p_txt, re.DOTALL)
          matchObj = p.search(txt)
          attachpath = matchObj.group(1).strip()
          href       = matchObj.group(2).strip()
          title      = matchObj.group(3).strip()
          return([attachpath, href, title])

      def get_data(self):
          get_code = self.get_code
          get_time = self.get_time
          get_link = self.get_link
          #
          df = self.df_txt
          codes = [get_code(td) for td in df['证券代码']]
          short_names = [get_code(td) for td in df['简称']]
          ahts = [get_link(td) for td in df['公告标题']]
          times = [get_time(td) for td in df['公告时间']]
          #
          prefix = self.prefix
          prefix_href = self.prefix
          df = pd.DataFrame({'证券代码': codes,
                             '简称': short_names,
                             '公告标题': [aht[2] for aht in ahts],
                             'attachpath': [prefix + aht[0] for aht in ahts],
                             'href': [prefix_href + aht[1] for aht in ahts],
                             '公告时间': times
              })
          self.df_data = df
          return(df)

  f = open('HW3.html',encoding='utf-8')
  html = f.read()
  f.close()

  dt = DisclosureTable(html)  # 对html应用定义的class
  df = dt.get_data()
  df.to_csv('data.csv')


结果

结果截图 结果截图 结果截图 结果截图 结果截图

解释

  1. 导入模块,运用selenium模块访问深交所官网,输入公司名称,选择公告类别,输入 时间范围,提取搜索结果代码写入html文件中。
  2. 用自定义函数解析深交所定期报告搜索表格。
  3. 将表格内容导入spyder形成数据框,并导出为一个csv文件。