Copiate gente, copiate! (Che poi va a fuoco il computer...) :-D

redirectlinks.py

#!/usr/bin/env python
# -*- coding: utf-8 -*-

#################################################################################################
#																								#
#	redirectlinks.py																			#
#																								#
#	This script corrects links to a redirect.													#
#																								#
#	Syntax: redirectlinks.py [options]															#
#	Options:	-dest:Destpage																	#
#				It gets all redirects which link to Destpage and corrects their references.		#
#																								#
#				-page:Redirpage																	#
#				It corrects all the references of the specified redirect.						#
#																								#
#	You must specify the -page OR the -dest option.												#
#																								#
#################################################################################################

import re, wikipedia

def main():
	args = wikipedia.handleArgs()
	for argh in args:
		if argh.startswith("-dest:"):
			destPage = wikipedia.Page(wikipedia.getSite(), argh[6:])
			redirs = destPage.getReferences(redirectsOnly = True)
		if argh.startswith("-page:"):
			redirs = [wikipedia.Page(wikipedia.getSite(), argh[6:])]
			if not redirs[0].isRedirectPage():
				raise wikipedia.IsNotRedirectPage(redirs[0])
			destPage = redirs[0].linkedPages()[0]
	print destPage.title()
	for r in redirs:
		redirRefPages = r.getReferences()
		for ref in redirRefPages:
			print "Correcting links in page: " + ref.title()
			oldtext = ref.get()
			newtext = oldtext
			linkRegexp = "\[\[" + r.title() + "(\|[^]]+)?\]\]"
			ptrn = re.compile(linkRegexp, re.IGNORECASE)
			linkMatch = re.search(ptrn, newtext)
			
			while linkMatch != None:
				oldLink = newtext[linkMatch.start():linkMatch.end()]
				afterLink = linkMatch.group(1)
				if afterLink == None:
					afterLink = ""
				newLink = re.sub(linkRegexp, "[[" + destPage.title() + afterLink + "]]", oldLink)
				choice = wikipedia.inputChoice("Replacing link " + oldLink + " to " + newLink + ".\nDo you want to change the link?", ["Accept", "Change"], ['a', 'c'], 'a')
				if choice in ['C', 'c']:
					linkText = wikipedia.input("Insert link (without square brackets): [[" + destPage.title())
					newLink = "[[" + destPage.title() + linkText + "]]"
				newtext = newtext[:linkMatch.start()] + newLink + newtext[linkMatch.end():]
				linkMatch = re.search(ptrn, newtext)
				
			wikipedia.showDiff(oldtext, newtext)
			choice = wikipedia.inputChoice(u'Modifico?',  ['Yes', 'No'], ['y', 'N'], 'N')
			if choice in ['Y', 'y']:
				ref.put(newtext, str("Correcting links to redirect: " + r.title()))

if __name__ == "__main__":
	try:
		main()
	finally:
		wikipedia.stopme()

lonelypages.py

Attenzione!: questo script ha dei problemi con le disambigue. Prende le pagine da Speciale:Lonelypages, verifica che siano veramente orfane e inserisce l'avviso in quelle che non lo hanno già.

# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-

#
# (C) Pietrodn it.wiki
# (C) Filnik it.wiki
# lonelypages.py - version: 1.5
#

import wikipedia
import re
from pagegenerators import AllpagesPageGenerator

def main():
    args = wikipedia.handleArgs()
    wikiSite = wikipedia.getSite()
    allpages = wikiSite.lonelypages(number = 10000, repeat = True)
    for i in allpages:
        if i.isRedirectPage() or i.isDisambig():
            continue
        refs = i.getReferences()
        refsList = list()
        for j in refs:
            refsList = refsList.append(j)
        if len(refsList) > 0:
            wikipedia.output("The page isn't orphan...")
            continue
        elif len(refsList) == 0:
            try:
                oldtxt = i.get()
            except wikipedia.NoPage:
                wikipedia.output("%s doesn't exist! Skip.." % i.title())
            except wikipedia.IsRedirectPage:
                wikipedia.output("%s is a redirect! Skip..." % i.title())
            if '{{o}}' in oldtxt.lower() or '{{o|' in oldtxt.lower():
                wikipedia.output("The page has already the template!")
                continue
            else:
                newtxt = u'{{O||mese={{sbust:CURRENTMONTH}} {{subst:CURRENTYEAR}}}}\n' + oldtxt
                print "\t\t>>> %s <<<" % i.title()
                wikipedia.showDiff(oldtxt, newtxt)
                choice = wikipedia.inputChoice(u'Pagina orfana non segnalata! Posso procedere?',  [u'Yes', u'No'], [u'y', u'N'], u'N')
                if choice in [u'Y', u'y']:
                    try:
                        i.put(newtxt, u'Bot: Voce orfana, aggiungo template {{O}}')
                    except wikipedia.EditConflict:
                        wikipedia.output('Edit Conflict! Skip...')

if "__main__" == __name__:
    try:
        main()
    finally:
        wikipedia.stopme()

Il mio benvenuto.py

Da Utente:Alfiobot/benvenuto.py

######################################################
#
# benvenuto.py
#
# Bot per dare il benvenuto agli utenti (sono diventati troppi per farlo a mano...)
#
# Scarica il log dei nuovi utenti (fino ad un limite predefinito) e, per ogni utente
# che ancora non ha pagina di discussione, crea il seguente testo:
#
# {{benvenuto|nome=<nomeutente>}}
#
# Basato sulle librerie pywikipediabot.


import urllib, re
import wikipedia


wikipedia.handleArgs()

# No. of users to check
limit = 250

# URL of the newuser log
url = "http://%s/w/index.php?title=Speciale:Log&type=newusers&user=&page=&limit=%d" % (wikipedia.getSite().hostname(), limit)

# Search regular expression to find links like this (and the class attribute is optional too)
#<a href="/w/index.php?title=Discussioni_utente:Urizon9&action=edit" class="new" title="Discussioni utente:Urizon9">Discussione</a>

regexp = '</a> \(<a href=\"/w/index.php\?title=Discussioni_utente:(.*?)&amp;action=edit'

# Modify user-agent string
class AppURLopener(urllib.FancyURLopener):
    version = "Alfiobot/1.0"
urllib._urlopener = AppURLopener()

# Modify summary text
wikipedia.setAction("benvenuto")

# Read newuser log
print "Getting newuser log (last %d new users)..." % limit
f = urllib.urlopen(url)
text = f.read()
f.close()

r = re.compile(regexp, re.UNICODE)

# Loop over the newuser log and put welcome messages on empty discussion pages
pos = 0
tutti = False
while 1:
        m = r.search(text, pos)
        if m == None:
                break
        pos = m.end()
        
        username = m.group(1)
        
        print "User %s needs welcome" % username
        page = u'Discussioni utente:%s' % username
        
        p = wikipedia.Page(wikipedia.getSite(), page)
                
        # Additional check: make a get to prevent the rare case where a discussion page
        # is created between the newuser log download and now.
        try:
         p.get()
        except wikipedia.NoPage:
         newtext = u'{{benve|nome={{subst:PAGENAME}}|1=--~~~~}}'
         wikipedia.showDiff('', newtext)
         if not tutti:
          choice = wikipedia.inputChoice(u"Vuoi dare il benvenuto all'utente %s?" % username, ['Yes', 'No', 'All'], ['y', 'N', 'a'], 'N')
          if choice in ['a', 'A']:
           tutti = True
         if tutti or choice in ['y', 'Y']:
          p.put(newtext)
          print u"User OK!"
wikipedia.stopme()

proxyaperti.py

Questo script fa semplicemente un edit in una pagina. In verità serve per scovare i proxy aperti, agendo da sloggati. Occorre però crackare wikipedia.py. Utilizzabile solo da personale addestrato ;-)

#! -*- coding: utf-8 -*-

import wikipedia

args = wikipedia.handleArgs()
pagina = wikipedia.Page(wikipedia.getSite(), args[0])
oldtxt = pagina.get()
newtxt = oldtxt + '\nSono un vandalo malvagissimo. Non bloccatemi. --~~~~'
wikipedia.showDiff(oldtxt, newtxt)
choice = wikipedia.inputChoice('Posso procedere?',  ['Yes', 'No'], ['y', 'N'], 'N')
if choice in ['Y', 'y']:
 wikipedia.setAction('Bot anonimo per individuare proxy aperti firmato Pietrodn. Non bloccare please.')
 pagina.put(newtxt)
wikipedia.stopme()

lavorosporco.py

Questo script serve per aggiornare le statistiche del lavoro sporco.

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import re, wikipedia
from templatecount import TemplateCountRobot

def main():
	args = wikipedia.handleArgs()
	
	wikiSite = wikipedia.getSite()
	counter = TemplateCountRobot()
	templates = {	'A': 'Aiutare',
					'C': 'Controllare',
					'Categorizzare': 'Da categorizzare',
					'Controlcopy': 'Controlcopy',
					'E': 'Ency',
					'O': 'Orfane',
					'P': 'NPOV',
					'Senzafonti': 'Senza fonti',
					'S': 'Stub',
					'T': 'Tradurre',
					'U': 'Unire',		
					'W': 'Wikificare',	
	}
	
	pagina = wikipedia.Page(wikiSite, 'Progetto:Coordinamento/Statistiche manutenzioni')
	vecchieStatistiche = pagina.get()
	
	templateCountDict = counter.countTemplates(templates.keys(), None)
	
	for k in templateCountDict:
		sottopagina = wikipedia.Page(wikiSite, 'Progetto:Coordinamento/Statistiche manutenzioni/' + templates[k])
		oldtxt = sottopagina.get()
		newtxt = re.sub("</noinclude>.*", "</noinclude>" + str(templateCountDict[k]), oldtxt)
		wikipedia.showDiff(oldtxt, newtxt)
		choice = wikipedia.inputChoice(u"Modificare?",  ['Yes', 'No'], ['y', 'N'], 'N')
		if choice in ['Y', 'y']:
			wikipedia.setAction(u'Conto lavoro sporco')
			sottopagina.put(newtxt)
			
	findRegexp = "\d+:\d+, \d+ .{3} \d+ \(.*?\)"
	replaceRegexp = "~~~~~"
	nuoveStatistiche = re.sub(findRegexp, replaceRegexp, vecchieStatistiche)
	wikipedia.showDiff(vecchieStatistiche, nuoveStatistiche)
	choice = wikipedia.inputChoice(u"Aggiorno le date?",  ['Yes', 'No'], ['y', 'N'], 'N')
	if choice in ['Y', 'y']:
		wikipedia.setAction(u'Aggiorno le date del conto')
		pagina.put(nuoveStatistiche)
	

if __name__ == "__main__":
	try:
		main()
	finally:
		wikipedia.stopme()