Skip to content

Instantly share code, notes, and snippets.

@fullsushidev
Created November 15, 2018 06:11
Show Gist options
  • Save fullsushidev/fe7594ec90fdb6ea5e329efef712bbd3 to your computer and use it in GitHub Desktop.
Save fullsushidev/fe7594ec90fdb6ea5e329efef712bbd3 to your computer and use it in GitHub Desktop.
A simple python script to manipulate data from SWAPI - The Star Wars API
import requests
PEOPLE_ENDPOINT = 'https://swapi.co/api/people'
def print_results(collected_names):
"""
Print out the total of names collected and the total
- @param: collected_name - list of all names collected
"""
print('Quantidade de personagens coletados: ', len(collected_names))
for idx, name in enumerate(collected_names, 1):
print(idx, name)
def retrive_next_page_url(page):
"""
Return the value of `next` field in page response.
This value could be a valid URL or None
"""
return page['next']
def extract_people_from_single_page(page):
"""
Extract the list of names of each characters listed in the `page` dict
- @param: page - dict with the data from a page
"""
return [person['name'] for person in page['results']]
def get_page(page_url=None):
"""
Given a page URL (`page_url`) retrieve a raw response from the API
If response status code is 200 will return the JSON data from the API
Otherwise, will return None
- @param: `page_url` the URL of the page to retrive, if None,
then will point to the first page. a.k.a.: https://swapi.co/api/people/
"""
if page_url is None:
# if not defined, will retrieve the first page
page_url = PEOPLE_ENDPOINT
response = requests.get(page_url)
return response.json() if response.status_code == 200 else None
def main():
people_names_collected = []
# collect first page:
first_page = get_page()
people_names_from_page = extract_people_from_single_page(first_page)
people_names_collected.extend(people_names_from_page)
target_page_url = retrive_next_page_url(first_page)
while True:
page = get_page(target_page_url)
if page is None:
print('Resposta inválida durante a requisição da página: ',
target_page_url)
elif page['next']:
target_page_url = retrive_next_page_url(page)
people_names_from_page = extract_people_from_single_page(page)
people_names_collected.extend(people_names_from_page)
else:
# collect data from the last page
people_names_from_page = extract_people_from_single_page(page)
people_names_collected.extend(people_names_from_page)
break
print_results(people_names_collected)
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment