2013-11-09 17:20:53 +00:00
|
|
|
#!/usr/bin/python3
|
|
|
|
|
|
|
|
|
|
import argparse
|
2014-06-22 07:45:37 +00:00
|
|
|
import base64
|
2013-11-09 18:01:43 +00:00
|
|
|
import http.client
|
2014-06-01 19:20:42 +00:00
|
|
|
import html.parser
|
2014-06-21 16:23:25 +00:00
|
|
|
import os.path
|
2013-11-09 17:20:53 +00:00
|
|
|
import sys
|
2013-11-09 18:01:43 +00:00
|
|
|
from urllib.parse import urlparse
|
2014-06-17 18:31:02 +00:00
|
|
|
import zlib
|
2013-11-09 17:20:53 +00:00
|
|
|
|
2014-06-01 19:20:42 +00:00
|
|
|
|
2014-06-22 07:47:21 +00:00
|
|
|
class InfiniteRedirects(Exception): pass
|
|
|
|
|
|
|
|
|
|
|
2014-06-01 19:20:42 +00:00
|
|
|
class TitleParser(html.parser.HTMLParser):
|
2014-06-22 07:45:37 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
|
html.parser.HTMLParser.__init__(self, *args, **kwargs)
|
|
|
|
|
self.images = set()
|
|
|
|
|
|
2014-06-21 05:58:47 +00:00
|
|
|
def handle_starttag(self, name, attribs):
|
2014-06-22 07:45:37 +00:00
|
|
|
if name == 'img':
|
|
|
|
|
for attr, value in attribs:
|
|
|
|
|
if attr == 'src':
|
|
|
|
|
self.images.add(value)
|
|
|
|
|
elif name == 'title':
|
2014-06-22 07:59:02 +00:00
|
|
|
titletag_start = self.rawdata.index('<title')
|
|
|
|
|
title_start = self.rawdata.index('>', titletag_start) + 1
|
2014-06-21 05:58:47 +00:00
|
|
|
title_end = self.rawdata.index('</title>', title_start)
|
|
|
|
|
self.title = self.rawdata[title_start:title_end]
|
2014-06-01 19:20:42 +00:00
|
|
|
|
|
|
|
|
|
2014-06-22 07:47:21 +00:00
|
|
|
def download_content(url, depth=0):
|
2013-11-09 17:20:53 +00:00
|
|
|
'''download page and decode it to utf-8'''
|
2014-06-22 07:47:21 +00:00
|
|
|
if depth > 10:
|
|
|
|
|
raise InfiniteRedirects('too much redirects: %s' % url)
|
|
|
|
|
|
2013-11-09 18:01:43 +00:00
|
|
|
up = urlparse(url)
|
2014-06-22 07:48:04 +00:00
|
|
|
if not up.netloc:
|
2014-06-17 18:28:54 +00:00
|
|
|
up = urlparse('//' + url)
|
2013-11-09 18:01:43 +00:00
|
|
|
|
|
|
|
|
headers = {
|
|
|
|
|
"Host": up.netloc,
|
|
|
|
|
"Content-Type": "text/html; charset=utf-8",
|
|
|
|
|
"Connection": "keep-alive",
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-22 07:48:04 +00:00
|
|
|
if not up.scheme or up.scheme == 'http':
|
2013-11-09 18:01:43 +00:00
|
|
|
conn = http.client.HTTPConnection(up.netloc)
|
|
|
|
|
elif up.scheme == 'https':
|
|
|
|
|
conn = http.client.HTTPSConnection(up.netloc)
|
|
|
|
|
else:
|
2014-06-15 16:16:35 +00:00
|
|
|
raise NotImplementedError("protocol %s is not implemented" % up.scheme)
|
2013-11-09 18:01:43 +00:00
|
|
|
|
|
|
|
|
conn.request("GET", up.path, None, headers)
|
|
|
|
|
response = conn.getresponse()
|
2013-12-24 19:00:43 +00:00
|
|
|
|
|
|
|
|
# follow redirects
|
2014-06-17 18:28:54 +00:00
|
|
|
if ((response.status == http.client.MOVED_PERMANENTLY)
|
|
|
|
|
or (response.status == http.client.FOUND)):
|
2013-11-09 20:15:30 +00:00
|
|
|
new_url = response.getheader('Location')
|
2014-06-21 05:43:12 +00:00
|
|
|
print('Redirecting to ' + new_url)
|
2014-06-22 07:47:21 +00:00
|
|
|
return download_content(new_url, depth+1)
|
2014-06-17 18:26:12 +00:00
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_page(url):
|
|
|
|
|
response = download_content(url)
|
2013-11-09 18:01:43 +00:00
|
|
|
|
2013-11-09 18:39:35 +00:00
|
|
|
# get page charset from response header
|
2014-06-17 18:26:12 +00:00
|
|
|
c_type = response.getheader('Content-Type')
|
|
|
|
|
if not c_type.startswith('text'):
|
|
|
|
|
raise ValueError('incorrect Content-Type for HTML page: %s' % c_type)
|
|
|
|
|
|
2014-06-17 18:31:02 +00:00
|
|
|
c_encoding = response.getheader('Content-Encoding')
|
|
|
|
|
if c_encoding:
|
|
|
|
|
if c_encoding == 'gzip':
|
|
|
|
|
page_binary = zlib.decompress(response.read(), 16+zlib.MAX_WBITS)
|
|
|
|
|
else:
|
|
|
|
|
raise NotImplementedError(
|
|
|
|
|
'content encoding %s is not implemented' % c_encoding)
|
|
|
|
|
else:
|
|
|
|
|
page_binary = response.read()
|
|
|
|
|
|
2014-06-17 18:26:12 +00:00
|
|
|
charset = 'iso-8859-1'
|
|
|
|
|
ct_spl = c_type.split('; ')
|
|
|
|
|
if len(ct_spl) > 1:
|
|
|
|
|
charset = ct_spl[1].split('=')[1]
|
2013-11-09 18:01:43 +00:00
|
|
|
page = page_binary.decode(charset)
|
|
|
|
|
|
|
|
|
|
return page
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
2014-06-22 07:45:37 +00:00
|
|
|
def embedded_image(url):
|
|
|
|
|
'''Download content from URL and return bytes if target is image'''
|
|
|
|
|
response = download_content(url)
|
|
|
|
|
ctype = response.getheader('Content-Type')
|
|
|
|
|
if not ctype or not ctype.startswith('image'):
|
|
|
|
|
raise ValueError('incorrect Content-Type for image: %s' % ctype)
|
|
|
|
|
b64pict = base64.b64encode(response.read()).decode()
|
|
|
|
|
return 'data:%s;base64,%s' % (ctype, b64pict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def embed_pictures(page, pict_urls):
|
|
|
|
|
for url in pict_urls:
|
|
|
|
|
print('New picture: %s' % url)
|
|
|
|
|
try:
|
|
|
|
|
page = page.replace(url, embedded_image(url))
|
2014-06-22 07:47:21 +00:00
|
|
|
except (ValueError, InfiniteRedirects):
|
2014-06-22 07:45:37 +00:00
|
|
|
pass
|
|
|
|
|
return page
|
|
|
|
|
|
2014-06-01 19:20:42 +00:00
|
|
|
|
2014-06-22 07:45:37 +00:00
|
|
|
def write_file(page, title, comment=None):
|
|
|
|
|
fname = title.replace('/', '_') + '.html'
|
2014-06-21 16:09:15 +00:00
|
|
|
inc = 1
|
|
|
|
|
while True:
|
2014-06-21 16:23:25 +00:00
|
|
|
if not os.path.exists(fname):
|
|
|
|
|
break
|
|
|
|
|
inc += 1
|
2014-06-22 07:45:37 +00:00
|
|
|
fname = title.replace('/', '_') + '_%d.html' % inc
|
2014-06-21 16:23:25 +00:00
|
|
|
|
|
|
|
|
with open(fname, 'x', newline='\n') as a_file:
|
|
|
|
|
print('Saving in file "%s"' % fname)
|
2014-06-22 07:45:37 +00:00
|
|
|
a_file.write(page)
|
2014-06-21 16:23:25 +00:00
|
|
|
if comment:
|
|
|
|
|
a_file.write('<!-- URL: %s -->' % comment)
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
2014-06-17 18:28:54 +00:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
|
description='Nevernote - download pages locally.')
|
|
|
|
|
parser.add_argument('urls', metavar='URL', type=str, nargs='+',
|
|
|
|
|
help='URL of page to download')
|
2013-11-09 17:20:53 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
|
for url in args.urls:
|
|
|
|
|
page = get_page(url)
|
2014-06-22 07:45:37 +00:00
|
|
|
parser = TitleParser(strict=False)
|
|
|
|
|
parser.feed(page)
|
|
|
|
|
|
|
|
|
|
for picturl in parser.images:
|
|
|
|
|
up = urlparse(picturl)
|
|
|
|
|
if not up.netloc:
|
|
|
|
|
parser.images.remove(picturl)
|
|
|
|
|
picturl = '//' + urlparse(url).netloc + picturl
|
|
|
|
|
parser.images.add(picturl)
|
|
|
|
|
|
|
|
|
|
full_page = embed_pictures(page, parser.images)
|
|
|
|
|
write_file(full_page, parser.title, comment=url)
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
sys.exit(main())
|