Following Bucky's web scraping tutorial I coded this scraper using Python 2.7 to fetch links from the first 3 pages of TrueLocal.com.au and write them to a text file.

When I run the program, only the first link is written in the text file. What can I do so that all the URLs returned are written on the file?



import requests
from bs4 import BeautifulSoup

def tru_crawler(max_pages):
page = 1
while page <= max_pages:
url = 'http://www.truelocal.com.au/find/car-rental/' + str(page)
code = requests.get(url)
text = code.text
soup = BeautifulSoup(text)
for link in soup.findAll('a', {'class':'name'}):
href = 'http://www.truelocal.com.au' + link.get('href')
fob = open('c:/test/true.txt', 'w')
fob.write(href + '\n')
fob.close()
print (href)
page += 1


tru_crawler(3)