import asyncio import aiohttp import schedule import time import random import threading from config import App async def fetch_endpoint(session, url): async with session.get(url) as response: return await response.text() async def fetch_endpoint_with_retry(session, url): timeout = aiohttp.ClientTimeout(total=None) # No timeout async with session.get(url, timeout=timeout) as response: if response.status == 200: return await response.text() elif response.status == 500: retry_delay = random.randint(30, 90) print(f"Received a 500 response from {url}. Retrying in {retry_delay} seconds.") await asyncio.sleep(retry_delay) return await fetch_endpoint_with_retry(session, url) else: print(f"Received an unexpected response code {response.status} from {url}.") return None async def main(): print("Script is running...") async with aiohttp.ClientSession() as session: # Fetch all endpoints concurrently endpoints = App.ParserEndpointList.split() # Create tasks for fetching all endpoints concurrently tasks = [fetch_endpoint_with_retry(session, url) for url in endpoints] # Wait for all endpoint tasks to complete endpoint_responses = await asyncio.gather(*tasks) print(endpoint_responses) # Check if all endpoints returned 200 status codes if all(response is not None for response in endpoint_responses): # Call another_endpoint only if all endpoints returned 200 another_endpoint = App.DownloaderLink response = await fetch_endpoint(session, another_endpoint) # Process the response from the final endpoint print(response) else: print("Not all endpoints returned 200 status codes. Skipping another_endpoint.") def run_scheduler(): while True: schedule.run_pending() time.sleep(1) if __name__ == '__main__': # Schedule the script to run every day at a specific time print(f'Repos downloading is scheduled on {App.ScheduleTime}') schedule.every().day.at(App.ScheduleTime).do(lambda: asyncio.run(main())) # Run the scheduler in a separate thread scheduler_thread = threading.Thread(target=run_scheduler) scheduler_thread.start()