import os import pandas as pd import requests # Define the path to the CSV file csv_file_path = '' # Define the output directory to save the images output_dir = '' os.makedirs(output_dir, exist_ok=True) # Read the CSV file df = pd.read_csv(csv_file_path) # Find the column named 'URL' url_column = None for column in df.columns: if 'URL' in column: url_column = column break if not url_column: raise ValueError("No 'URL' column found in the CSV file") # Iterate over the URLs and download the images for idx, row in df.iterrows(): url = row[url_column] image_name = f'{10000 + idx}.jpg' # Naming the images starting from 10000.jpg image_path = os.path.join(output_dir, image_name) try: response = requests.get(url, stream=True) if response.status_code == 200: with open(image_path, 'wb') as file: for chunk in response.iter_content(1024): file.write(chunk) print(f'Successfully downloaded {image_name}') else: print(f'Failed to download image at {url}') except Exception as e: print(f'An error occurred while downloading {url}: {e}')