Python

Cobbled together from here and there.

Generate random numbers:

from random import shuffle, randint

total=100
count=5

nums = []
    
for i in range(count-1):
	x = randint(15, 25)
	nums.append(x)
	total = total - x

nums.append(total)

shuffle(nums)

print nums

raw_input("")

Scrape Google Finance for major indexes:

from urllib import urlopen
from bs4 import BeautifulSoup
from datetime import datetime

start = 'https://www.google.com/finance'

page = urlopen(start)

soup = BeautifulSoup(page, "html.parser")

dji_price = soup.find('span',{'id':'ref_983582_l'})
dji_change = soup.find('span',{'id':'ref_983582_c'})
dji_pct = soup.find('span',{'id':'ref_983582_cp'})

sp500_price = soup.find('span',{'id':'ref_626307_l'})
sp500_change = soup.find('span',{'id':'ref_626307_c'})
sp500_pct = soup.find('span',{'id':'ref_626307_cp'})

nasdaq_price = soup.find('span',{'id':'ref_13756934_l'})
nasdaq_change = soup.find('span',{'id':'ref_13756934_c'})
nasdaq_pct = soup.find('span',{'id':'ref_13756934_cp'})

print ""
print "------------------------------"
print "UPDATED: " + datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print "------------------------------"
print ""

print("Dow Jones: " + dji_price.get_text() + " " + dji_change.get_text() + " " + dji_pct.get_text())

print ""

print("S&P 500: " + sp500_price.get_text() + " " + sp500_change.get_text()+ " " + sp500_pct.get_text())

print ""

print("Nasdaq: " + nasdaq_price.get_text() + " " + nasdaq_change.get_text()+ " " + nasdaq_pct.get_text())

raw_input("")

Download a video from Spanish TV:

import urllib
import urllib2
import os
import time
from bs4 import BeautifulSoup
from urllib import urlopen
from urllib import urlretrieve
from progressbar import ProgressBar, Percentage, Bar

#Go to http://www.rtve.es/alacarta/videos/cuentame-como-paso/ and get episode URL

print "Getting URL from RTVE..."
print ""

#initial_url = raw_input("Copy Link de RTVE.es: ")

cuentame_home = 'http://www.rtve.es/alacarta/videos/cuentame-como-paso/'
page = urlopen(cuentame_home)
soup = BeautifulSoup(page, "html.parser")

all_urls = []
for link in soup.find_all('a'):
   better_url = link.get('href')
   all_urls.append(better_url)

filtered_urls = []   
filtered_urls = filter(lambda x: '/alacarta/videos/cuentame-como-paso/cuentame-como-paso-' in x, all_urls)
initial_url = filtered_urls[0]

#Go to http://piraminetlab.com and pass RTVE link into form
#
#http://www.blog.pythonlibrary.org/2012/06/08/python-101-how-to-submit-a-web-form/

print "Getting download link from Piraminetlab..."
print ""

data = urllib.urlencode({'url_original': initial_url})
url = 'http://www.piraminetlab.com/enlaces.php'
full_url = url + '?' + data
response = urllib2.urlopen(full_url)

#Get .mp4 download link
#
#https://stackoverflow.com/questions/4462061/beautiful-soup-to-parse-url-to-get-another-urls-data
soup = BeautifulSoup(response, "html.parser")
soup.prettify()
for anchor in soup.findAll('a',{'class':'enlace'}):
    new_url = anchor['href']
#print new_url

#Download episode and save to the desktop

print "Downloading episode and saving to Desktop..."
print ""

mp4 = urllib.URLopener()
mp4.retrieve(new_url, "PATHHERE")

wait = raw_input("")

Rename files:

import glob
import re
import os
 
files = glob.glob('*.JPG')  # get *.JPG in a list (not sorted!)
files.sort()                # sort the list _in place_
cnt = 11                    # start new names with 11.jpg
 
for f in files:
    original = f                                    # save the original file name
    result = re.search(r'Scan(\d+)\.JPG', f)        # pattern to match
    if result:                                      # Is there a match?
        new_name = str(cnt) + '.jpg'                # create the new name
        print "%s => %s" % (original, new_name)     # verify if it's OK
        # os.rename(original, new_name)             # then uncomment to rename
        cnt += 1                                    # increment the counter

Split large text file into several smaller ones:

import re

#Take the input.
txt = open('TXT_FILE_PATH_HERE').read()

#Split based on condition, in this case that it contains -- End -- at the end of each message.
emailList = re.split(r'-- End --', txt)

i = 0
for emails in emailList:
	#Open file to be written to, using iterator to increment filename by 1.
	file = open('TARGET_DIR_HERE'+str(i)+'.txt', 'w')
	file.write(emailList[i])
	i += 1