信息流标题优化总有各种理论、策略、技巧的文章讲解,初看句句在理,实际操作起来很难落地。信息流标题优化在实际中最常见的就是找准创意点,然后根据自身情况发挥。

最直接的办法就是有大量同行给我们参考,信息流标题批量获取然后利用。

百度信息流标题批量获取

import requests
import json

url = "https://api.baidu.com/json/feed/v1/TitleRecommendService/getTitle"

user_payload = {
    "header": {
        "username": "xxxxxxxxxxxx",
        "password": "xxxxxxxxxxxxxx",
        "token": "xxxxxxxxxxxx",
        "action": "API-PYTHON"
    },
    "body": {
        "pageNo": 1,
        "pageSize": 100,
        "query": "query",
        "sortField": "related",
        "sortOrder": "desc"
    }
} 
http_headers = {
    "Accept-Encoding": "gzip, deflate",
    "Content-Type": "application/json",
    "Accept": "application/json"
}
user_payload = json.dumps(user_payload)
response = requests.request("POST", url, data=user_payload, headers=http_headers).json()
listData=response['body']['data'][0]['listData']
for t in listData:
    print(t['title'])

头条信息流标题批量获取

import requests

headers = {
    'authority': 'ad.oceanengine.com',
    'accept': 'application/json, text/javascript, */*; q=0.01',
    'x-requested-with': 'XMLHttpRequest',
    'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36 Edg/85.0.564.51',
    'x-csrftoken': 'vuig8f49p2jCXxYIHC4OUjk3FaP6ZYym',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-mode': 'cors',
    'sec-fetch-dest': 'empty',
    'referer': 'https://ad.oceanengine.com/pages/creative/create.html',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'cookie': 'cookies',
}

params ='industry_first=0&industry_second=0&keywords=query&origin=title_recommend&num=15&page=1'

response = requests.get('https://ad.oceanengine.com/overture/api/procedural/title_recommend/', headers=headers, params=params)
data=response.json()['data']['title_list']

for title in data:
    xxltitle=title['title'].strip()
    print (xxltitle)

adbug信息流标题批量获取

#  python3
# -*- encoding: utf-8 -*-
'''
@File    :   ad.py
@Time    :   2020/07/22 22:38:55
@Author  :   开水
@Contact :   admin@hekaiyu.cn
@Department   :  Sun Yat-Sen University
@Desc    :   adbug抓取
'''
import time,re,requests,os



#搜索接口:https://testapi.adbug.cn/g/api/search
#payload: {"page":1,"size":50,"accurate":2,"sem_type":"no","filter_wechat_meta":"yes","wd":word,"token":token}


def postjson(word,token):
    url='https://testapi.adbug.cn/g/api/search'
    #type:3视频
    parameters={"accurate": "2","type":"3","page":1,"size":50,"isTips":"","sem_type":"no","shopsem_type":"no","filter_wechat_meta":"yes","wd":word,"token":token}
    r = requests.post(url, data = parameters)
    return r.json()
    
def downimg(path,am_source_url):
    with open("images/"+path+os.path.basename(am_source_url),'wb') as f:
        if ".mp4" in am_source_url:
            img_url='https://file.adbug.cn/m/flv/'+am_source_url
        else:
            img_url='https://file.adbug.cn/m/image/'+am_source_url
        header={
            'Referer':'https://www.adbug.cn/adSearch?keyword=',
            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
        }
        resp=requests.get(img_url,headers=header)
        f.write(resp.content)
    print(os.path.basename(am_source_url)+"保存成功")
def downtext(title_list,name):
    file = open(name+'.txt','w');
    file.write(str(title_list));
    file.close();
    
def getdata(words,token):
    titles_list=[]
    title_list=[]
    title_c_list=[]
    desc_list=[]
    img_list=[]
    for word in words:
        return_json=postjson(word,token)
        msg=return_json["msg"]
        print (msg)
        if msg!= "获取成功":
            continue
        for data in return_json['data']:
            title=data['title']
            print (title)
            am_source_url=data['am_source_url']
            width=data['width']
            height=data['height']
            if title not in titles_list:
                if len(title)<15:
                    title_list.append(title)
                elif len(title)>14 and len(title)<25:
                    title_c_list.append(title)
                else:
                    desc_list.append(title)
            if width==480 and height==320:
                img_list.append(am_source_url)
                downimg('hx/',am_source_url)
            if width==1280 and height==720:
                img_list.append(am_source_url)
                downimg('hd/',am_source_url)
            else:
                downimg('',am_source_url)
    downtext(title_list,'title_list')
    downtext(title_c_list,'title_c_list')
    downtext(desc_list,'desc_list')
    
words=['query']
token="token"

getdata(words,token)