import sys import os from pathlib import Path import logging import json import time import random import argparse import requests API_KEY = 'AIzaSyA-dlBUjVQeuc4a6ZN4RkNUYDFddrVLxrA' BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_PART = ['snippet', 'status', 'contentDetails', 'player', 'localizations', 'id'] API_PARAMS = { 'part': ','.join(API_PART), 'maxResults': 50} # Input: list # Output: unique list (ordered) def unique_list(_list): (new_list, seen) = ([], {}) for item in _list: if item in seen: continue else: seen[item] = 1 new_list.append(item) return new_list # Creates error logger with handler def make_error_logger(filename): handler = logging.FileHandler(filename) handler.setLevel(logging.ERROR) formatter = logging.Formatter("%(asctime)s|%(levelname)s|%(message)s", "%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) logger = logging.getLogger(__name__) logger.addHandler(handler) return logger # Pages through API request and returns list of all items # Params must be in the Session object def get_api_items(endpoint, session): # Get response js_api = [] session.params['key'] = API_KEY err_msg = None while True: exit_codes = [200, 400, 401, 403, 404, 409] ret = __get_response( BASE_URL + endpoint, session=session, exit_codes=exit_codes, sleep=[1, 3]) (response, session) = (ret['response'], ret['session']) js = json.loads(response.text) # Skip if 4** status code if response.status_code // 100 == 4: errs = ','.join([x['reason'] for x in js['error']['errors']]) err_msg = '{} {}'.format(js['error']['code'], errs) print(f'Error: {err_msg}') print('Skipping...') break # Concatenate API js_api.extend(js['items']) # Continue if nextPageToken next_page_token = js.get('nextPageToken') if next_page_token is None: break else: session.params['pageToken'] = next_page_token return {'json': js_api, 'session': session, 'error': err_msg} def __get_response(url, session=None, exit_codes=None, sleep=None): if session is None: session = requests.Session() if exit_codes is None: exit_codes = [200] if sleep is None: sleep = [1, 3] elif not isinstance(sleep, list): sleep = [sleep] response = None while True: full_url = url if session.params: full_url += '?' + '&'.join( '{}={}'.format(k, v) for k, v in session.params.items()) print(f'Retrieving {full_url}') try: response = session.get(url, timeout=10) except Exception as e: print(f'Exception: {e}') __sleep(*sleep) continue # Check status code status_code = response.status_code print(f'Status: {status_code}') if status_code in exit_codes: break else: print('Retrying...') api_params = session.params session = requests.Session() session.params = api_params __sleep(*sleep) return {'response': response, 'session': session} def __sleep(x, y=None): if y is None: y = x sleep_time = round(random.uniform(x, y), 3) print(f'Sleeping for {sleep_time} seconds...') time.sleep(sleep_time) return # Parse arguments def __parse_args(argv): parser = argparse.ArgumentParser( description='Write playlist metadata for channels.', formatter_class=argparse.RawTextHelpFormatter) parser._action_groups.pop() required = parser.add_argument_group('required arguments') optional = parser.add_argument_group('optional arguments') required.add_argument( '-i', '--input', required=True, help='List of channelIds (https://www.youtube.com/channel/{channelId})', metavar='channels.txt') optional.add_argument( '-d', '--api_dir', type=Path, default=Path(__file__).absolute().parent / 'channels', help='Output directory for API files. Default is ./channels.', metavar='./channels') optional.add_argument( '-o', '--output', default=None, help='File to output all playlistIds', metavar='playlists.txt') args = parser.parse_args(argv) f_in = args.input api_dir = args.api_dir f_out = args.output if not f_in.endswith('.txt'): raise ValueError('Input filename not a .txt file.') if f_out is not None and not f_out.endswith('.txt'): raise ValueError('Output filename not a .txt file.') return {'input': f_in, 'api_dir': api_dir, 'output': f_out} def main(args): # Parse arguments args = __parse_args(args) f_in = args['input'] api_dir = args['api_dir'] f_out = args['output'] # Get Ids from input file with open(f_in, 'r', encoding='utf-8') as f: ids_in = [x.rstrip() for x in f] # Create output directory if not os.path.exists(api_dir): os.makedirs(api_dir) # Create error logger err_log = make_error_logger('error.log') # Iterate over Ids session = requests.Session() session.params = API_PARAMS total_ids = len(ids_in) for i, _id in enumerate(ids_in): print('\n########################\n') print(f'Processing Id {_id} ({i + 1}/{total_ids})...') # Get API info session.params['channelId'] = _id session.params.pop('pageToken', None) # Reset pageToken ret = get_api_items('playlists', session) (js, session, err_msg) = (ret['json'], ret['session'], ret['error']) if err_msg: err_log.error(f'{_id}: {err_msg}') continue print('Found {} item(s).'.format(len(js))) if len(js) > 0: # Write API info to file f_api = os.path.join(api_dir, f'{_id}_playlists.json') print(f'Writing API info to {f_api}...') with open(f_api, 'w', encoding='utf-8') as f: json.dump(js, f, indent=4) print('Write successful.') # Extract Ids and append to file (if enabled) if f_out: item: dict # type hint print(f'Writing extracted Ids to {f_out}...') with open(f_out, 'a+', encoding='utf-8', newline='\n') as f: [f.write(item['id']+'\n') for item in js if item.get('id')] print('Write successful.') # Deduplicate Ids in file (if enabled) if f_out: print(f'\nDe-duplicating all extracted Ids in {f_out}...') with open(f_out, 'r+', encoding='utf-8', newline='\n') as f: ids_out = unique_list([x.rstrip() for x in f]) f.seek(0) [f.write(x + '\n') for x in ids_out] f.truncate() print('Finished!') return if __name__ == '__main__': main(sys.argv[1:])