2013-11-09 17:20:53 +00:00
|
|
|
#!/usr/bin/python3
|
|
|
|
|
|
|
|
|
|
import argparse
|
2014-06-22 07:45:37 +00:00
|
|
|
import base64
|
2014-06-01 19:20:42 +00:00
|
|
|
import html.parser
|
2014-07-20 09:06:51 +00:00
|
|
|
import os
|
|
|
|
|
import re
|
2013-11-09 17:20:53 +00:00
|
|
|
import sys
|
2013-11-09 18:01:43 +00:00
|
|
|
from urllib.parse import urlparse
|
2014-07-20 04:09:07 +00:00
|
|
|
from urllib.request import urlopen
|
2013-11-09 17:20:53 +00:00
|
|
|
|
2014-06-01 19:20:42 +00:00
|
|
|
|
2014-07-20 09:06:51 +00:00
|
|
|
class UrlDuplicateError(Exception): pass
|
|
|
|
|
URLDUP = re.compile(r'^<!-- URL: (.*) -->$')
|
2014-06-22 07:47:21 +00:00
|
|
|
|
|
|
|
|
|
2014-06-01 19:20:42 +00:00
|
|
|
class TitleParser(html.parser.HTMLParser):
|
2014-06-22 07:45:37 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
|
html.parser.HTMLParser.__init__(self, *args, **kwargs)
|
|
|
|
|
self.images = set()
|
2014-06-22 19:51:18 +00:00
|
|
|
self.css = set()
|
2014-06-22 07:45:37 +00:00
|
|
|
|
2014-06-21 05:58:47 +00:00
|
|
|
def handle_starttag(self, name, attribs):
|
2014-06-22 07:45:37 +00:00
|
|
|
if name == 'img':
|
|
|
|
|
for attr, value in attribs:
|
|
|
|
|
if attr == 'src':
|
|
|
|
|
self.images.add(value)
|
|
|
|
|
elif name == 'title':
|
2014-06-22 07:59:02 +00:00
|
|
|
titletag_start = self.rawdata.index('<title')
|
|
|
|
|
title_start = self.rawdata.index('>', titletag_start) + 1
|
2014-06-21 05:58:47 +00:00
|
|
|
title_end = self.rawdata.index('</title>', title_start)
|
|
|
|
|
self.title = self.rawdata[title_start:title_end]
|
2014-06-22 19:51:18 +00:00
|
|
|
elif name == 'link':
|
|
|
|
|
attr_dict = dict(attribs)
|
|
|
|
|
if attr_dict.get('rel') == 'stylesheet':
|
|
|
|
|
self.css.add(attr_dict['href'])
|
2014-06-01 19:20:42 +00:00
|
|
|
|
|
|
|
|
|
2014-07-20 04:09:07 +00:00
|
|
|
def get_text(url, content='text/html'):
|
|
|
|
|
u = urlopen(url)
|
|
|
|
|
if u.status != 200:
|
|
|
|
|
raise RuntimeError('Incorrect HTTP status for %s' % url)
|
|
|
|
|
ctype = u.headers.get('content-type')
|
|
|
|
|
if ctype is None:
|
|
|
|
|
raise RuntimeError('None content type for %s' % url)
|
|
|
|
|
if not ctype.startswith(content):
|
|
|
|
|
raise RuntimeError('Incorrect content-type for %s: %s' % (url, ctype))
|
2014-07-20 09:31:20 +00:00
|
|
|
|
|
|
|
|
# get charset from 'Content-type' header
|
|
|
|
|
charset = ctype.split(';')[1].split('=')[1] if 'charset' in ctype else 'utf-8'
|
2014-07-20 04:09:07 +00:00
|
|
|
data = u.read()
|
2014-07-20 09:31:20 +00:00
|
|
|
page = data.decode(charset.lower())
|
2013-11-09 18:01:43 +00:00
|
|
|
return page
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
2014-06-22 07:45:37 +00:00
|
|
|
def embedded_image(url):
|
|
|
|
|
'''Download content from URL and return bytes if target is image'''
|
2014-07-20 04:09:07 +00:00
|
|
|
u = urlopen(url)
|
|
|
|
|
if u.getcode() != 200:
|
|
|
|
|
raise RuntimeError('Incorrect status for %s' % url)
|
|
|
|
|
ctype = u.headers.get('Content-Type')
|
|
|
|
|
data = u.read()
|
|
|
|
|
b64pict = base64.b64encode(data).decode()
|
2014-06-22 07:45:37 +00:00
|
|
|
return 'data:%s;base64,%s' % (ctype, b64pict)
|
|
|
|
|
|
|
|
|
|
|
2014-06-22 18:56:43 +00:00
|
|
|
def embed_pictures(page, pict_urls, base_url=None):
|
2014-06-22 07:45:37 +00:00
|
|
|
for url in pict_urls:
|
|
|
|
|
print('New picture: %s' % url)
|
|
|
|
|
try:
|
2014-06-22 18:56:43 +00:00
|
|
|
page = page.replace(
|
|
|
|
|
url, embedded_image(complete_url(url, base_url)))
|
2014-07-20 09:06:51 +00:00
|
|
|
except (ValueError, ConnectionRefusedError):
|
2014-06-22 07:45:37 +00:00
|
|
|
pass
|
|
|
|
|
return page
|
|
|
|
|
|
2014-06-01 19:20:42 +00:00
|
|
|
|
2014-06-22 19:51:18 +00:00
|
|
|
def embed_css(page, css_urls, base_url=None):
|
|
|
|
|
for url in css_urls:
|
|
|
|
|
if not url:
|
|
|
|
|
continue
|
|
|
|
|
print('New CSS: %s' % url)
|
2014-07-20 04:09:07 +00:00
|
|
|
css_start = page.rindex('<', 0, page.index(url))
|
|
|
|
|
css_end = page.index('>', css_start) + 1
|
|
|
|
|
css_tag = ('<style media="screen" type="text/css">%s</style>'
|
|
|
|
|
% get_text(complete_url(url, base_url), 'text/css'))
|
|
|
|
|
page = page[:css_start] + css_tag + page[css_end:]
|
2014-06-22 19:51:18 +00:00
|
|
|
return page
|
|
|
|
|
|
|
|
|
|
|
2014-07-20 09:06:51 +00:00
|
|
|
def url_duplicate(url):
|
|
|
|
|
for htmlfile in os.listdir():
|
|
|
|
|
if not htmlfile.endswith('.html'):
|
|
|
|
|
continue
|
|
|
|
|
with open(htmlfile) as h:
|
|
|
|
|
h_url = h.readline()
|
|
|
|
|
if url in URLDUP.findall(h_url):
|
|
|
|
|
raise UrlDuplicateError(
|
|
|
|
|
'URL is already saved in file "%s"' % htmlfile)
|
|
|
|
|
|
|
|
|
|
|
2014-06-22 07:45:37 +00:00
|
|
|
def write_file(page, title, comment=None):
|
2014-06-22 08:38:05 +00:00
|
|
|
write_inc = lambda i: '_%d' % i if i > 1 else ''
|
|
|
|
|
inc = 0
|
2014-06-21 16:09:15 +00:00
|
|
|
while True:
|
2014-06-22 08:38:05 +00:00
|
|
|
inc += 1
|
|
|
|
|
fname = ' '.join(title.replace('/', '_').split()) + write_inc(inc) + '.html'
|
2014-06-21 16:23:25 +00:00
|
|
|
if not os.path.exists(fname):
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
with open(fname, 'x', newline='\n') as a_file:
|
|
|
|
|
print('Saving in file "%s"' % fname)
|
|
|
|
|
if comment:
|
2014-07-20 08:17:01 +00:00
|
|
|
a_file.write('<!-- URL: %s -->\n' % comment)
|
|
|
|
|
a_file.write(page)
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
2014-06-22 18:56:43 +00:00
|
|
|
def complete_url(url, base_url):
|
2014-07-20 04:09:07 +00:00
|
|
|
base_up = urlparse(base_url)
|
2014-06-22 18:56:43 +00:00
|
|
|
if base_url is not None:
|
|
|
|
|
up = urlparse(url)
|
|
|
|
|
if not up.netloc:
|
2014-07-20 04:09:07 +00:00
|
|
|
url = base_up.scheme + '://' + base_up.netloc + url
|
2014-07-20 09:30:22 +00:00
|
|
|
elif not up.scheme:
|
|
|
|
|
url = base_up.scheme + ':' + url
|
2014-06-22 18:56:43 +00:00
|
|
|
return url
|
|
|
|
|
|
|
|
|
|
|
2014-07-20 09:48:18 +00:00
|
|
|
def process_url(url):
|
|
|
|
|
print('Processing URL: %s' % url)
|
|
|
|
|
try:
|
|
|
|
|
url_duplicate(url)
|
|
|
|
|
except UrlDuplicateError as e:
|
|
|
|
|
print(e)
|
|
|
|
|
return
|
|
|
|
|
page = get_text(url)
|
|
|
|
|
parser = TitleParser(strict=False)
|
|
|
|
|
parser.feed(page)
|
|
|
|
|
|
|
|
|
|
page = embed_pictures(page, parser.images, base_url=url)
|
|
|
|
|
page = embed_css(page, parser.css, base_url=url)
|
|
|
|
|
write_file(page, parser.title, comment=url)
|
|
|
|
|
|
|
|
|
|
|
2013-11-09 17:20:53 +00:00
|
|
|
def main():
|
2014-06-17 18:28:54 +00:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
|
description='Nevernote - download pages locally.')
|
|
|
|
|
parser.add_argument('urls', metavar='URL', type=str, nargs='+',
|
|
|
|
|
help='URL of page to download')
|
2013-11-09 17:20:53 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
2014-07-20 09:48:18 +00:00
|
|
|
for arg in args.urls:
|
|
|
|
|
if os.path.isfile(arg):
|
|
|
|
|
print('Found file %s' % arg)
|
|
|
|
|
for url in (line.strip() for line in open(arg)):
|
|
|
|
|
process_url(url)
|
|
|
|
|
else:
|
|
|
|
|
process_url(arg)
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
sys.exit(main())
|