import requests
import re
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
regex
email_re = re.compile(r'([\w.,]+@[\w.,]+.\w+)')
link_re = re.compile(r'href="(.*?)"')
def crawl(url):
result = set()
req = requests.get(url)
# Check if successful
if(req.status_code != 200):
return []
# Find links
links = link_re.findall(req.text)
print("\nFound {} links".format(len(links)))
# Search links for emails
for link in links:
# Get an absolute URL for a link
link = urljoin(url, link)
# Find all emails on current page
result.update(email_re.findall(req.text))
return result
if name == 'main':
emails = crawl('http://www.realpython.com')
print("\nScrapped e-mail addresses:")
for email in emails:
print(email)
print("\n")</pre>
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。