2019-10-22 13:44:27 +00:00
|
|
|
#!/usr/bin/env python3
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
import argparse
|
2014-06-22 07:45:37 +00:00
|
|
|
import base64
|
2014-07-20 09:06:51 +00:00
|
|
|
import os
|
|
|
|
|
import re
|
2013-11-09 17:20:53 +00:00
|
|
|
import sys
|
2013-11-09 18:01:43 +00:00
|
|
|
from urllib.parse import urlparse
|
2019-10-22 08:33:06 +00:00
|
|
|
|
|
|
|
|
import requests
|
2019-10-22 13:05:29 +00:00
|
|
|
from bs4 import BeautifulSoup
|
2013-11-09 17:20:53 +00:00
|
|
|
|
2014-07-20 09:06:51 +00:00
|
|
|
URLDUP = re.compile(r'^<!-- URL: (.*) -->$')
|
2014-06-22 07:47:21 +00:00
|
|
|
|
|
|
|
|
|
2019-10-22 08:33:06 +00:00
|
|
|
def get_text(url):
|
|
|
|
|
response = requests.get(url)
|
|
|
|
|
response.raise_for_status()
|
|
|
|
|
return response.text
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
2019-10-22 08:43:54 +00:00
|
|
|
def get_embedded_binary(url):
|
|
|
|
|
"""Download content from URL and return bytes if target is image"""
|
2019-11-09 14:33:58 +00:00
|
|
|
try:
|
|
|
|
|
response = requests.get(url)
|
|
|
|
|
response.raise_for_status()
|
|
|
|
|
except requests.exceptions.RequestException:
|
|
|
|
|
return ''
|
|
|
|
|
|
2014-07-20 17:40:14 +00:00
|
|
|
ctype = response.headers.get('Content-Type')
|
2019-10-22 08:33:06 +00:00
|
|
|
data = response.content
|
2014-07-20 04:09:07 +00:00
|
|
|
b64pict = base64.b64encode(data).decode()
|
2014-06-22 07:45:37 +00:00
|
|
|
return 'data:%s;base64,%s' % (ctype, b64pict)
|
|
|
|
|
|
|
|
|
|
|
2019-10-22 11:39:36 +00:00
|
|
|
def is_downloaded(url: str) -> bool:
|
2019-10-22 08:43:54 +00:00
|
|
|
"""Check if url was already downloaded"""
|
|
|
|
|
for htmlfile in os.listdir(path='.'):
|
2014-07-20 09:06:51 +00:00
|
|
|
if not htmlfile.endswith('.html'):
|
|
|
|
|
continue
|
2019-10-22 11:39:36 +00:00
|
|
|
|
2014-07-20 09:06:51 +00:00
|
|
|
with open(htmlfile) as h:
|
|
|
|
|
h_url = h.readline()
|
|
|
|
|
if url in URLDUP.findall(h_url):
|
2019-10-22 11:39:36 +00:00
|
|
|
print("URL is already saved in file '%s'" % htmlfile)
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
return False
|
2014-07-20 09:06:51 +00:00
|
|
|
|
|
|
|
|
|
2014-06-22 07:45:37 +00:00
|
|
|
def write_file(page, title, comment=None):
|
2019-10-22 08:43:54 +00:00
|
|
|
"""Save HTML to file on a disk"""
|
2014-06-22 08:38:05 +00:00
|
|
|
write_inc = lambda i: '_%d' % i if i > 1 else ''
|
|
|
|
|
inc = 0
|
2014-06-21 16:09:15 +00:00
|
|
|
while True:
|
2014-06-22 08:38:05 +00:00
|
|
|
inc += 1
|
2014-10-04 14:59:32 +00:00
|
|
|
fname = (' '.join(title.replace('/', '_').split()) + write_inc(inc))[:128] + '.html'
|
2014-06-21 16:23:25 +00:00
|
|
|
if not os.path.exists(fname):
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
with open(fname, 'x', newline='\n') as a_file:
|
|
|
|
|
print('Saving in file "%s"' % fname)
|
|
|
|
|
if comment:
|
2014-07-20 08:17:01 +00:00
|
|
|
a_file.write('<!-- URL: %s -->\n' % comment)
|
|
|
|
|
a_file.write(page)
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
2019-10-22 08:43:54 +00:00
|
|
|
def complete_url(url, base_url=None):
|
|
|
|
|
"""Create absolute URL from relative paths"""
|
2014-07-20 04:09:07 +00:00
|
|
|
base_up = urlparse(base_url)
|
2014-06-22 18:56:43 +00:00
|
|
|
if base_url is not None:
|
|
|
|
|
up = urlparse(url)
|
|
|
|
|
if not up.netloc:
|
2014-07-20 04:09:07 +00:00
|
|
|
url = base_up.scheme + '://' + base_up.netloc + url
|
2014-07-20 09:30:22 +00:00
|
|
|
elif not up.scheme:
|
|
|
|
|
url = base_up.scheme + ':' + url
|
2014-06-22 18:56:43 +00:00
|
|
|
return url
|
|
|
|
|
|
|
|
|
|
|
2019-10-22 11:39:36 +00:00
|
|
|
def process_url(url: str, dup_check: bool = False):
|
2019-10-22 08:43:54 +00:00
|
|
|
"""Save single URL to a file"""
|
2019-10-22 11:39:36 +00:00
|
|
|
url = url.strip()
|
2014-07-20 09:48:18 +00:00
|
|
|
print('Processing URL: %s' % url)
|
2019-10-22 11:39:36 +00:00
|
|
|
|
|
|
|
|
if dup_check and is_downloaded(url):
|
2014-07-20 09:48:18 +00:00
|
|
|
return
|
|
|
|
|
|
2019-10-22 13:05:29 +00:00
|
|
|
page_content = get_text(url)
|
|
|
|
|
soup = BeautifulSoup(page_content, 'html.parser')
|
|
|
|
|
|
|
|
|
|
for img_tag in soup.find_all('img'):
|
|
|
|
|
img_url = complete_url(img_tag['src'], base_url=url)
|
|
|
|
|
print('New picture: %s' % img_url)
|
|
|
|
|
img_b64 = get_embedded_binary(img_url)
|
|
|
|
|
img_tag['src'] = img_b64
|
|
|
|
|
|
|
|
|
|
for link_tag in soup.find_all('link'):
|
|
|
|
|
link_url = complete_url(link_tag['href'], base_url=url)
|
2019-10-22 13:45:13 +00:00
|
|
|
if link_tag.get('rel') and 'stylesheet' in link_tag['rel']:
|
2019-10-22 13:05:29 +00:00
|
|
|
print('New CSS: %s' % link_url)
|
|
|
|
|
css_tag = soup.new_tag('style', media='screen', type='text/css')
|
|
|
|
|
css_tag.string = get_text(link_url)
|
|
|
|
|
link_tag.replace_with(css_tag)
|
|
|
|
|
|
|
|
|
|
for script_tag in soup.find_all('script'):
|
|
|
|
|
if script_tag.get('src') is None:
|
|
|
|
|
continue
|
|
|
|
|
script_url = complete_url(script_tag['src'], base_url=url)
|
|
|
|
|
print('New script: %s' % script_url)
|
|
|
|
|
script_b64 = get_embedded_binary(script_url)
|
|
|
|
|
script_tag['src'] = script_b64
|
2014-07-20 13:31:43 +00:00
|
|
|
|
2019-10-22 13:45:40 +00:00
|
|
|
write_file(str(soup), soup.title.text, comment=url)
|
2014-07-20 09:48:18 +00:00
|
|
|
|
|
|
|
|
|
2013-11-09 17:20:53 +00:00
|
|
|
def main():
|
2014-06-17 18:28:54 +00:00
|
|
|
parser = argparse.ArgumentParser(
|
2019-10-22 09:15:31 +00:00
|
|
|
prog="nevernote.py",
|
|
|
|
|
description="Nevernote - tool for downloading pages locally."
|
|
|
|
|
)
|
|
|
|
|
parser.add_argument("-i", "--infile",
|
|
|
|
|
help="File with URLs to download")
|
2019-10-22 11:39:36 +00:00
|
|
|
parser.add_argument("-s", "--skip-dups", action="store_false",
|
|
|
|
|
default=True, dest="dup_check",
|
|
|
|
|
help="Rewrite already downloaded files")
|
2019-10-22 09:15:31 +00:00
|
|
|
parser.add_argument('urls', metavar='URL', type=str, nargs='*',
|
2019-10-22 11:39:36 +00:00
|
|
|
default=sys.stdin,
|
2019-10-22 08:43:54 +00:00
|
|
|
help='URL of page to download')
|
2013-11-09 17:20:53 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
2019-10-22 09:15:31 +00:00
|
|
|
# Process URLs from the file
|
|
|
|
|
if args.infile:
|
|
|
|
|
try:
|
|
|
|
|
fd = open(args.infile, 'r')
|
|
|
|
|
except OSError as err:
|
|
|
|
|
print(err)
|
|
|
|
|
return 1
|
|
|
|
|
for url in fd.readlines():
|
2019-10-22 11:39:36 +00:00
|
|
|
process_url(url, dup_check=args.dup_check)
|
2019-10-22 09:15:31 +00:00
|
|
|
fd.close()
|
|
|
|
|
|
|
|
|
|
# Process URLs from CLI
|
2014-07-20 09:48:18 +00:00
|
|
|
for arg in args.urls:
|
2019-10-22 11:39:36 +00:00
|
|
|
process_url(arg, dup_check=args.dup_check)
|
2013-11-09 17:20:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
sys.exit(main())
|