import scraperwiki
import scraperwiki
import lxml.html
import uuid
import datetime
ASINS = ["B00C6Q1Z6E","B00CQHZ2LW","B00DF0ZP8Y","B00DSDYE3A","B00C6Q9688"] summary = ""
for asin in ASINS:
url = "http://www.amazon.com/dp/"+asin
html = scraperwiki.scrape(url)
root = lxml.html.fromstring(html)
for title in root.cssselect("span[id='btAsinTitle']"):
summary += title.text +": "
break
for price in root.cssselect("span[id='actualPriceValue'] b"):
summary += price .text +"
"
break
summary += url + "
"
now = datetime.datetime.now() data = { 'link': "http://www.amazon.com/"+"&uuid="+str(uuid.uuid1()), 'title': "Price Monitoring " + str(now), 'description': summary, 'pubDate': str(now) , } scraperwiki.sqlite.save(unique_keys=['link'],data=data)
Average successful run time: half a minute
Total run time: about 21 hours
Total cpu time used: 16 minutes
Total disk space used: 19.8 KB