import boto3
import csv
from io import StringIO
import time
def search_all_resources():
client = boto3.client('resource-explorer-2')
all_resources = []
# Initialize with a dummy NextToken to start the first page
next_token = None
retries = 0
while True:
try:
if next_token:
# Fetch resources for the current page
response = client.search(
MaxResults=123,
NextToken=next_token,
QueryString=' ',
ViewArn='arn'
)
if 'Resources' in response:
all_resources.extend(response['Resources'])
print('in next token loop')
# Check if there are more pages
next_token = response.get('NextToken')
else:
response = client.search(
MaxResults=123,
QueryString=' ',
ViewArn='arn'
)
if 'Resources' in response:
all_resources.extend(response['Resources'])
# If no exception occurred, reset retries
retries = 0
break
except client.exceptions.ThrottlingException as e:
if retries >= 5: # Max retries
print("Max retries exceeded. Exiting.")
raise e
else:
retries += 1
delay = 2 ** retries
print(f"ThrottlingException. Retrying in {delay} seconds.")
time.sleep(delay)
return all_resources
def lambda_handler(event, context):
# Search for all resources and fetch paginated results
all_resources = search_all_resources()
# Print and write to CSV
if all_resources:
csv_buffer = StringIO()
csv_writer = csv.DictWriter(csv_buffer, fieldnames=all_resources[0].keys())
csv_writer.writeheader()
csv_writer.writerows(all_resources)
csv_data = csv_buffer.getvalue()
# Specify your S3 bucket and key
s3_bucket = 'test-bucket'
s3_key = 'path/to/aws_resources.csv'
# Upload CSV data to S3
s3 = boto3.client('s3')
s3.put_object(Body=csv_data, Bucket=s3_bucket, Key=s3_key)
print(f"Successfully uploaded CSV to S3: s3://{s3_bucket}/{s3_key}")
else:
print("No resources found.")
we tried to get Inventory it means list of all resources from aws console but we were able to get only 123 of them .
Using above code we are getting Throttling exception for fetching the resources, also it works only once. we are unable to fetch more than 123 resources, our aim is to fetch all resources available in resource explorer weather it might be 500-100, the code should fetch and store in csv file in s3 bucket
2
Answers
In your code you have
MaxResults=123
which is working fine I presume that is why you are getting 123 but you also havebreak
in the same level as the if statement which means your while loop will run only once.That’s why you have 123
There are a couple of problems here:
As mentioned in other answer, you have an unconditional break which will stop your loop.
You do next_token = response.get(‘NextToken’) only in the branch where next_token already exists, meaning you will not populate
next_token
on your first go. I would change the way you go through them to getting the list first, and then looping through ifNextToken
exists. Something like this: