|
|
|
@ -38,6 +38,7 @@ def main():
|
|
|
|
|
limit = 500
|
|
|
|
|
wikis = []
|
|
|
|
|
while True:
|
|
|
|
|
#query does not retrieve wikifarms wikis, fix it? https://wikiapiary.com/wiki/Reiser4_FS_Wiki
|
|
|
|
|
url = 'https://wikiapiary.com/wiki/Special:Ask/-5B-5BCategory:Website-20not-20archived-5D-5D-20-5B-5BIs-20defunct::False-5D-5D-20-5B-5BIs-20in-20farm::False-5D-5D/-3F%%3DWiki-23/-3FHas-20API-20URL%%3DAPI/-3FHas-20pages-20count%%3DPages/-3FHas-20images-20count%%3DImages/format%%3Dtable/limit%%3D%d/link%%3Dall/sort%%3DHas-20pages-20count,Has-20images-20count/order%%3Dasc/mainlabel%%3DWiki/searchlabel%%3D%%E2%%80%%A6-20further-20results/offset%%3D%d' % (limit, offset)
|
|
|
|
|
f = urllib.urlopen(url)
|
|
|
|
|
raw = f.read()
|
|
|
|
@ -45,7 +46,7 @@ def main():
|
|
|
|
|
for i in m:
|
|
|
|
|
domain = getdomain(i[1])
|
|
|
|
|
if domain not in donewikis and not domain.endswith('editthis.info') and not domain.endswith('wiki-site.com'):
|
|
|
|
|
print i[0], i[1], i[2], i[3]
|
|
|
|
|
print i[1], i[2], i[3], i[0]
|
|
|
|
|
|
|
|
|
|
if not re.search(ur'rel="nofollow">Next</a>', raw):
|
|
|
|
|
break
|
|
|
|
|