blob: d38878097b9e58f703ab2ca352cff59043c3d22f (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
#!/usr/bin/env python
import urllib2
import subprocess
# make sure to: sudo apt-get install python-bs4
from bs4 import BeautifulSoup
import os
#urls to start search from
baseurl = ['http://cdimages.ubuntu.com/daily-live/current/', 'http://cdimages.ubuntu.com/trusty/daily-live/current/', 'http://cdimages.ubuntu.com/ubuntu-server/daily/current/']
for url in baseurl:
# grab the html content
response = urllib2.urlopen(url)
content = response.read()
# parse the html content
soup = BeautifulSoup(content)
# find all anchor tags and create a list of their 'href' attributes
all_links = [a.get('href') for a in soup.find_all('a')]
# filter the list of links to just keep the zsync links
zsync_links = [link for link in all_links if link.endswith('.zsync')]
# create a list of real url's by appending each link to the base url
urls = ['%s%s' % (url, link) for link in zsync_links]
directory = url.split("http://cdimages.ubuntu.com/")
# Grab the urls and with zsync update them
for url in urls:
os.chdir(os.path.expanduser("~")+"/iso")
if not os.path.exists(directory[1]):
os.makedirs(directory[1])
os.chdir(directory[1])
subprocess.call(["zsync", url])
|