#coding:utf-8
import os
from fake_useragent import UserAgent
import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve



download_path = '.\语文'
if not os.path.exists(download_path):
    os.makedirs(download_path)



def main():
    start_urls = ['https://book.yunzhan365.com/dwca/lhng/files/mobile/1.jpg']
    ua = UserAgent()
    headers = {'User-Agent':ua.chrome}
    r = requests.get(start_urls, headers = headers)
#  soup = BeautifulSoup(r.text,'lxml')
#  content = soup.find('div', class_ = 'book') #获取到了书本大的这个div,也就是book
#  images = content.find_all('jpg')


    for i in range(2, 71):
        start_urls.append(f'https://book.yunzhan365.com/dwca/lhng/files/mobile/{i}.jpg')
        urlretrieve(start_urls,download_path,cbk)
    print('下载完成')



if __name__ == '__main__':
    main()

以上程序不能运行,代码存一下,晚点换个电脑继续搞!

以下为搞定之后的代码,远没有那么麻烦,更新于2020年3月13日1点23分
注:这里是1年级下册语文练习题的例子,其他课件或练习题请自行修改代码

#coding:gbk
import requests
import os
download_path = '.\语文'
if not os.path.exists(download_path):
    os.makedirs(download_path)

for i in range(1, 71):
    url = f'https://book.yunzhan365.com/dwca/lhng/files/mobile/{i}.jpg'
    r = requests.request('get',url) #获取网页
    print(r.url,r.status_code)
    with open(f'{download_path}/{i}.jpg','wb') as f:  #打开写入到download_path路径里-二进制文件,返回的句柄名为f
        f.write(r.content)  #往f里写入r对象的二进制文件
f.close()
print('所有图片下载完成')

最后修改:2022 年 12 月 05 日
如果觉得我的文章对你有用,请随意赞赏