nevernote/nevernote.py

175 lines
4.9 KiB
Python
Raw Normal View History

2013-11-09 17:20:53 +00:00
#!/usr/bin/python3
import argparse
2014-06-22 07:45:37 +00:00
import base64
2014-06-01 19:20:42 +00:00
import html.parser
2014-07-20 09:06:51 +00:00
import os
import re
2013-11-09 17:20:53 +00:00
import sys
2013-11-09 18:01:43 +00:00
from urllib.parse import urlparse
2019-10-22 08:33:06 +00:00
import requests
2013-11-09 17:20:53 +00:00
2014-06-01 19:20:42 +00:00
2014-07-20 09:06:51 +00:00
class UrlDuplicateError(Exception): pass
URLDUP = re.compile(r'^<!-- URL: (.*) -->$')
2014-06-22 07:47:21 +00:00
2014-06-01 19:20:42 +00:00
class TitleParser(html.parser.HTMLParser):
2014-06-22 07:45:37 +00:00
def __init__(self, *args, **kwargs):
html.parser.HTMLParser.__init__(self, *args, **kwargs)
self.images = set()
2014-06-22 19:51:18 +00:00
self.css = set()
2014-07-20 20:46:30 +00:00
self.scripts = set()
2014-06-22 07:45:37 +00:00
2014-06-21 05:58:47 +00:00
def handle_starttag(self, name, attribs):
2014-06-22 07:45:37 +00:00
if name == 'img':
for attr, value in attribs:
if attr == 'src':
self.images.add(value)
2014-07-20 20:46:30 +00:00
elif name == 'script':
for attr, value in attribs:
if attr == 'src':
self.scripts.add(value)
2014-06-22 07:45:37 +00:00
elif name == 'title':
2014-06-22 07:59:02 +00:00
titletag_start = self.rawdata.index('<title')
title_start = self.rawdata.index('>', titletag_start) + 1
2014-06-21 05:58:47 +00:00
title_end = self.rawdata.index('</title>', title_start)
self.title = self.rawdata[title_start:title_end]
2014-06-22 19:51:18 +00:00
elif name == 'link':
attr_dict = dict(attribs)
if attr_dict.get('rel') == 'stylesheet':
self.css.add(attr_dict['href'])
2014-06-01 19:20:42 +00:00
2019-10-22 08:33:06 +00:00
def get_text(url):
response = requests.get(url)
response.raise_for_status()
return response.text
2013-11-09 17:20:53 +00:00
2014-06-22 07:45:37 +00:00
def embedded_image(url):
'''Download content from URL and return bytes if target is image'''
2019-10-22 08:33:06 +00:00
response = requests.get(url)
response.raise_for_status()
2014-07-20 17:40:14 +00:00
ctype = response.headers.get('Content-Type')
2019-10-22 08:33:06 +00:00
data = response.content
2014-07-20 04:09:07 +00:00
b64pict = base64.b64encode(data).decode()
2014-06-22 07:45:37 +00:00
return 'data:%s;base64,%s' % (ctype, b64pict)
2014-06-22 18:56:43 +00:00
def embed_pictures(page, pict_urls, base_url=None):
2014-06-22 07:45:37 +00:00
for url in pict_urls:
print('New picture: %s' % url)
try:
2014-06-22 18:56:43 +00:00
page = page.replace(
url, embedded_image(complete_url(url, base_url)))
2019-10-22 08:33:06 +00:00
except requests.exceptions.HTTPError:
2014-06-22 07:45:37 +00:00
pass
return page
2014-06-01 19:20:42 +00:00
2014-06-22 19:51:18 +00:00
def embed_css(page, css_urls, base_url=None):
for url in css_urls:
if not url:
continue
print('New CSS: %s' % url)
2014-07-20 04:09:07 +00:00
css_start = page.rindex('<', 0, page.index(url))
css_end = page.index('>', css_start) + 1
css_tag = ('<style media="screen" type="text/css">%s</style>' % get_text(
2019-10-22 08:33:06 +00:00
complete_url(url, base_url)))
2014-07-20 04:09:07 +00:00
page = page[:css_start] + css_tag + page[css_end:]
2014-06-22 19:51:18 +00:00
return page
2014-07-20 20:46:30 +00:00
def embed_scripts(page, script_urls, base_url=None):
for url in script_urls:
print('New script: %s' % url)
try:
page = page.replace(
url, embedded_image(complete_url(url, base_url)))
2019-10-22 08:33:06 +00:00
except requests.exceptions.HTTPError:
pass
2014-07-20 20:46:30 +00:00
return page
2014-07-20 09:06:51 +00:00
def url_duplicate(url):
for htmlfile in os.listdir():
if not htmlfile.endswith('.html'):
continue
with open(htmlfile) as h:
h_url = h.readline()
if url in URLDUP.findall(h_url):
raise UrlDuplicateError(
'URL is already saved in file "%s"' % htmlfile)
2014-06-22 07:45:37 +00:00
def write_file(page, title, comment=None):
2014-06-22 08:38:05 +00:00
write_inc = lambda i: '_%d' % i if i > 1 else ''
inc = 0
2014-06-21 16:09:15 +00:00
while True:
2014-06-22 08:38:05 +00:00
inc += 1
fname = (' '.join(title.replace('/', '_').split()) + write_inc(inc))[:128] + '.html'
2014-06-21 16:23:25 +00:00
if not os.path.exists(fname):
break
with open(fname, 'x', newline='\n') as a_file:
print('Saving in file "%s"' % fname)
if comment:
2014-07-20 08:17:01 +00:00
a_file.write('<!-- URL: %s -->\n' % comment)
a_file.write(page)
2013-11-09 17:20:53 +00:00
2014-06-22 18:56:43 +00:00
def complete_url(url, base_url):
2014-07-20 04:09:07 +00:00
base_up = urlparse(base_url)
2014-06-22 18:56:43 +00:00
if base_url is not None:
up = urlparse(url)
if not up.netloc:
2014-07-20 04:09:07 +00:00
url = base_up.scheme + '://' + base_up.netloc + url
elif not up.scheme:
url = base_up.scheme + ':' + url
2014-06-22 18:56:43 +00:00
return url
2014-07-20 09:48:18 +00:00
def process_url(url):
print('Processing URL: %s' % url)
try:
url_duplicate(url)
except UrlDuplicateError as e:
print(e)
return
2014-07-20 13:31:43 +00:00
try:
page = get_text(url)
parser = TitleParser()
2014-07-20 13:31:43 +00:00
parser.feed(page)
page = embed_pictures(page, parser.images, base_url=url)
page = embed_css(page, parser.css, base_url=url)
2014-07-20 20:46:30 +00:00
page = embed_scripts(page, parser.scripts, base_url=url)
2019-10-22 08:33:06 +00:00
except requests.exceptions.HTTPError as e:
2014-07-20 17:42:13 +00:00
print(e)
2014-07-20 13:31:43 +00:00
return False
2014-07-20 09:48:18 +00:00
write_file(page, parser.title, comment=url)
2013-11-09 17:20:53 +00:00
def main():
2014-06-17 18:28:54 +00:00
parser = argparse.ArgumentParser(
description='Nevernote - download pages locally.')
parser.add_argument('urls', metavar='URL', type=str, nargs='+',
help='URL of page to download')
2013-11-09 17:20:53 +00:00
args = parser.parse_args()
2014-07-20 09:48:18 +00:00
for arg in args.urls:
if os.path.isfile(arg):
print('Found file %s' % arg)
for url in (line.strip() for line in open(arg)):
process_url(url)
else:
process_url(arg)
2013-11-09 17:20:53 +00:00
if __name__ == '__main__':
sys.exit(main())