This is my modified web crawler that will print out all the links to a txt file.


import requests
from bs4 import BeautifulSoup

def forum_spider(max_pages):
links = []
page = 1
while page <= max_pages:
url = "https://buckysroom.org/forum/category.php?id=15&page=%s" % page
source_code = requests.get(url)
soup = BeautifulSoup(source_code.text)
for link in soup.findAll('a'):
href = "https://buckysroom.org" + link.get('href')
title = link.string
links.append(href)
page += 1
return links

with open("test.txt", "w") as fp:
fp.write("\n".join(forum_spider(4)))








forum_spider(1)