File tree Expand file tree Collapse file tree 6 files changed +113
-13
lines changed
Expand file tree Collapse file tree 6 files changed +113
-13
lines changed Original file line number Diff line number Diff line change 1+ {
2+ "python.pythonPath" : " /usr/bin/python"
3+ }
Original file line number Diff line number Diff line change 1+ #!/usr/bin/env python
2+
3+ ## Instagram Post Image Downloader
4+ ## Install Python pip Modules
5+
6+ # Python2 (pip)
7+ ## pip install wget
8+ ## pip install beautifulsoup4
9+ ## pip install lxml
10+
11+ # Python3 (pip)
12+ ## pip3 install wget
13+ ## pip3 install beautifulsoup4
14+ ## pip3 install lxml
15+
16+ from bs4 import BeautifulSoup
17+ import wget
18+
19+ try : #python3
20+ from urllib .request import urlopen
21+ except : #python2
22+ from urllib2 import urlopen
23+ input = raw_input
24+
25+ ## User input
26+ url = input ("\033 [1;32mEnter a Instagram Post URL : \033 [1;m" )
27+
28+ insta_post = urlopen (url )
29+ bs = BeautifulSoup (insta_post , "lxml" )
30+
31+ ## Find Insta Post Image
32+ metatag = bs .find ("meta" , {"property" : "og:image" })
33+
34+ if metatag is not None :
35+
36+ print (metatag ["content" ])
37+ print ("\n " )
38+
39+ print ("Image Started Downloading......." )
40+
41+ ## Download Image via Wget
42+ filename = wget .download (metatag ["content" ])
43+ print ("\n " )
44+
45+ print ("Done" )
46+ print ("\n " )
47+
48+ else :
49+ print ("Error" )
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change 1+ #!/usr/bin/env python
2+
3+ from BeautifulSoup import BeautifulSoup
4+ #import urllib2
5+ import cfscrape
6+ import re
7+
8+ #html_page = urllib2.urlopen("https://example.com")
9+
10+ # Get the text at the set URL
11+ scraper = cfscrape .create_scraper ()
12+
13+ url = "https://example.com"
14+ cfurl = scraper .get (url ).content
15+ soup = BeautifulSoup (cfurl )
16+ for link in soup .findAll ('a' , attrs = {'href' : re .compile ("^(http|https)://" )}):
17+
18+ ## Print Output
19+ print link .get ('href' )
Original file line number Diff line number Diff line change 11#!/usr/bin/env python
22
3- from BeautifulSoup import BeautifulSoup
4- #import urllib2
5- import cfscrape
6- import re
7-
8- #html_page = urllib2.urlopen("https://example.com")
9-
10- # Get the text at the set URL
11- scraper = cfscrape .create_scraper ()
3+ ## Install Python pip Modules
4+
5+ # Python2 (pip)
6+ ## pip install cfscrape
7+ ## pip install beautifulsoup4
8+ ## pip install lxml
9+
10+ # Python3 (pip)
11+ ## pip3 install cfscrape
12+ ## pip3 install beautifulsoup4
13+ ## pip3 install lxml
14+
15+ #from bs4 import BeautifulSoup
16+ #import cfscrape
17+ #import re
18+
19+ import sys
1220
13- url = "https://example.com"
21+ VER = 2
22+
23+ try :
24+ if sys .version_info >= (3 ,0 ):
25+ VER = 3
26+ from bs4 import BeautifulSoup
27+ import cfscrape
28+ import re
29+ else :
30+ input = raw_input
31+ from bs4 import BeautifulSoup
32+ import cfscrape
33+ import re
34+ except :
35+ pass
36+
37+
38+ ## User input
39+ url = input ("\033 [1;32mEnter a URL : \033 [1;m" )
40+
41+ scraper = cfscrape .create_scraper ()
1442cfurl = scraper .get (url ).content
15- soup = BeautifulSoup (cfurl )
43+ soup = BeautifulSoup (cfurl , "lxml" )
1644for link in soup .findAll ('a' , attrs = {'href' : re .compile ("^(http|https)://" )}):
1745
18- ## Print Output
19- print link .get ('href' )
46+ urls = link .get ("href" )
47+ print (urls )
48+
You can’t perform that action at this time.
0 commit comments