import sys import os from pathlib import Path import logging import json import random import time import argparse import requests BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_KEY = '' # place API key here API_PART = ['snippet','contentDetails','status'] API_PARAMS = { 'part': ','.join(API_PART), 'maxResults': 50} ## Creates error logger with handler def make_error_logger(filename): handler = logging.FileHandler(filename) handler.setLevel(logging.ERROR) formatter = logging.Formatter("%(asctime)s|%(levelname)s|%(message)s", "%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) logger = logging.getLogger(__name__) logger.addHandler(handler) return logger ## Pages through API request and returns list of all items ## Params must be in the Session object def get_api_items(endpoint,session): ## Get response js_api = [] session.params['key'] = API_KEY err_msg = None while True: exit_codes = [200,400,401,403,404,409] ret = __get_response( BASE_URL+endpoint,session=session,exit_codes=exit_codes, sleep=[1,3]) (response,session) = (ret['response'],ret['session']) js = json.loads(response.text) ## Skip if 4** status code if response.status_code // 100 == 4: errs = ','.join([x['reason'] for x in js['error']['errors']]) err_msg = '{} {}'.format(js['error']['code'],errs) print(f'Error: {err_msg}') print('Skipping...') break ## Concatenate API js_api.extend(js['items']) ## Continue if nextPageToken next_page_token = js.get('nextPageToken') if next_page_token is None: break else: session.params['pageToken'] = next_page_token return {'json': js_api, 'session': session, 'error': err_msg} ## Input: list ## Output: unique list (ordered) def unique_list(_list): (unique_list,seen) = ([],{}) for item in _list: if item in seen: continue else: seen[item] = 1 unique_list.append(item) return unique_list ## Parse arguments def __parse_args(argv): parser = argparse.ArgumentParser( description='Write playlistItems (videos) metadata for playlists.', formatter_class=argparse.RawTextHelpFormatter) parser._action_groups.pop() required = parser.add_argument_group('required arguments') optional = parser.add_argument_group('optional arguments') required.add_argument( '-i','--input',required=True, help='List of playlistIds \ (https://www.youtube.com/playlist?list={playlistId})', metavar='playlists.txt') optional.add_argument( '-d','--api_dir', type=Path, default=Path(__file__).absolute().parent / 'playlists', help='Output directory for API files. Default is ./playlists.', metavar='./playlists') optional.add_argument( '-o','--output',default=None, help='File to output all videoIds', metavar='videos.txt') args = parser.parse_args(argv) f_in = args.input api_dir = args.api_dir f_out = args.output if not f_in.endswith('.txt'): raise ValueError('Input filename not a .txt file.') if f_out is not None and not f_out.endswith('.txt'): raise ValueError('Output filename not a .txt file.') return {'input':f_in,'api_dir':api_dir,'output':f_out} def __sleep(x,y=None): if y is None: y = x sleep_time = round(random.uniform(x,y),3) print(f'Sleeping for {sleep_time} seconds...') time.sleep(sleep_time) return def __get_response(url,session=None,exit_codes=[200],sleep=[1,3]): if session is None: session = requests.Session() if not isinstance(sleep,list): sleep = [sleep] while True: if session.params: full_url = url + '?' + '&'.join( '{}={}'.format(k,v) for k,v in session.params.items()) print(f'Retrieving {full_url}') try: response = session.get(url,timeout=10) except Exception as e: print(f'Exception: {e}') __sleep(*sleep) continue ## Check status code status_code = response.status_code print(f'Status: {status_code}') if status_code in exit_codes: break else: print('Retrying...') api_params = session.params session = requests.Session() session.params = api_params __sleep(*sleep) return {'response':response,'session':session} def main(args): ## Create error logger err_log = make_error_logger('error.log') ## Parse arguments args = __parse_args(args) f_in = args['input'] api_dir = args['api_dir'] f_out = args['output'] ## Get Ids from input file with open(f_in,'r',encoding='utf-8') as f: ids_in = [x.rstrip() for x in f] ## Create output directory if not os.path.exists(api_dir): os.makedirs(api_dir) ## Iterate over Ids session = requests.Session() session.params = API_PARAMS total_ids = len(ids_in) ids_out = [] for i,_id in enumerate(ids_in): print('\n########################\n') print(f'Processing Id {_id} ({i+1}/{total_ids})...') ## Get API info session.params['playlistId'] = _id session.params.pop('pageToken', None) ## Reset pageToken ret = get_api_items('playlistItems',session) (js,session,err_msg) = (ret['json'],ret['session'],ret['error']) if err_msg: err_log.error(f'{_id}: {err_msg}') continue print('Found {} item(s).'.format(len(js))) if len(js) > 0: ## Write API info to file f_api = os.path.join(api_dir, f'{_id}_videos.json') print(f'Writing API info to {f_api}...') with open(f_api, 'w', encoding='utf-8') as f: json.dump(js, f, indent=4) print('Write successful.') ## Extract Ids and append to file (if enabled) if f_out: temp = [] for item in js: try: temp.append(item['contentDetails']['videoId']) except: pass print(f'Writing extracted Ids to {f_out}...') with open(f_out, 'a+', encoding='utf-8') as f: [f.write(x+'\n') for x in temp] print('Write successful.') ids_out.extend(temp) ## Write unique Ids to file (if enabled) if f_out: print(f'\nDeduplicating and writing all extracted Ids to {f_out}...') with open(f_out, 'w', encoding='utf-8') as f: [f.write(x+'\n') for x in unique_list(ids_out)] print('Finished!') return if __name__ == '__main__': main(sys.argv[1:])