keyword = input("请输入关键字:")
head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36"}
print("程序启动中... 一页30张图片")
page = int(input("需要爬取几页"))
url = 'https://image.baidu.com/search/acjson?'
p = 1
k = 0
import requests
import os
import json
import urllib
from bs4 import BeautifulSoup
try:
    os.mkdir('E:/素材/'+keyword)
except:
    print("目录已经存在")
for i in range(1,page+1):
    param = {
        'tn': 'resultjson_com',
        'logid': '8846269338939606587',
        'ipn': 'rj',
        'ct': '201326592',
        'is': '',
        'fp': 'result',
        'queryWord': keyword,
        'cl': '2',
        'lm': '-1',
        'ie': 'utf-8',
        'oe': 'utf-8',
        'adpicid': '',
        'st': '-1',
        'z':'' ,
        'ic':'' ,
        'hd': '',
        'latest': '',
        'copyright': '',
        'word': keyword,
        's':'' ,
        'se':'' ,
        'tab': '',
        'width': '',
        'height': '',
        'face': '0',
        'istype': '2',
        'qc': '',
        'nc': '1',
        'fr': '',
        'expermode': '',
        'force': '',
        'cg': 'girl',
        'pn': p,#从第几张图片开始
        'rn': '30',
        'gsm': '1e',
    }
    p = p+29


    r = requests.get(url,headers=head,params=param)
    r.encoding = 'utf-8'
    j = json.loads(r.text, strict=False)
    for i in j['data']:
        try:
            k = k+1
            urllib.request.urlretrieve(i['thumbURL'],'E:/素材/'+keyword+"/"+str(k)+'.jpg')
        except:
            pass
print("处理完成")
Last modification:August 17, 2021
如果觉得我的内容对你有用,请随意赞赏