This repository has been archived by the owner on Dec 23, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 43
/
Copy pathgiveaway_bot.py
128 lines (92 loc) · 3.79 KB
/
giveaway_bot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import json
import os
from src import browser, giveaway, logger, reddit, scraper, twitter, utils
def main():
giveaway.load_json()
history_ids = logger.read_log("data/history.csv")
error_ids = logger.read_log("data/errors.csv")
with open('config.json') as json_data_file:
config = json.load(json_data_file)
if not os.path.isfile("data/cookies.pkl"):
print("Did not find a authentication cookies file, please run login.py first")
exit(0)
if config['do_playrgg_giveaways'] and not os.path.isfile("data/cookies_playrgg.pkl"):
print("If you want to complete playrgg giveaways, first run login.py and complete the steps.")
exit(0)
# Get all the giveaway urls
utils.start_loading_text("Getting urls from https://reddit.com/r/giveaways/")
urls_reddit = reddit.get_urls()
urls = urls_reddit['gleam'].copy()
if config['do_playrgg_giveaways']:
urls.extend(urls_reddit['playrgg'])
utils.stop_loading_text(f"Got {len(urls)} urls from https://reddit.com/r/giveaways/")
utils.start_loading_text("Getting urls from http://gleamlist.com")
urls_gleamlist = scraper.get_urls_gleamlist()
utils.stop_loading_text(f"Got {len(urls_gleamlist)} urls from http://gleamlist.com")
urls.extend(urls_gleamlist)
if config['do_playrgg_giveaways']:
utils.start_loading_text("Getting urls from https://playr.gg/giveaways")
urls_playrgg = scraper.get_urls_playrgg()
utils.stop_loading_text(f"Got {len(urls_playrgg)} urls from https://playr.gg/giveaways")
urls.extend(urls_playrgg)
giveaways = []
for url in urls:
try:
giveaways.append(giveaway.Giveaway(url))
except ValueError:
continue
giveaways = utils.filter_giveaways(giveaways, history_ids, error_ids)
print(f"\nTotal givewaways after filtering: {len(giveaways)}")
if config['twitter_auth']['consumer_key'] != "":
twitter.init(config['twitter_auth'])
else:
print("Not using twitter, no details given in the config")
browser.init_driver()
# load the cookies
browser.apply_cookies("https://gleam.io/")
if config['do_playrgg_giveaways']:
browser.apply_cookies("https://playr.gg/")
# complete the giveaways
for g in giveaways:
print("\n")
browser.get_url(g.url)
print(f"Visited {g.url}")
try:
g.get_info()
print(g.name, end='', flush=True)
g.complete()
# update the info
browser.refresh()
g.get_info(after_giveaway=True)
except giveaway.CountryError:
print("\tNot available in your country", end='')
logger.write_error("data/errors.csv", g)
continue
except giveaway.EndedError:
print("\tGiveaway has ended", end='')
logger.write_error("data/errors.csv", g)
continue
except giveaway.NotStartedError:
print("\tGiveaway has not started yet", end='')
continue
except giveaway.PageNotAvailableError:
print("\tError getting page information or page does not exist", end='')
logger.write_error("data/errors.csv", g)
continue
except giveaway.NotLoggedInError:
print("\tNot logged in, please run login.py", end='')
continue
except giveaway.CaptchaError:
print("\tGiveaway requires Human Verification", end='')
logger.write_error("data/errors.csv", g)
continue
except ValueError:
logger.write_error("data/errors.csv", g)
continue
logger.write_log("data/history.csv", g)
if __name__ == '__main__':
try:
main()
finally:
utils.stop_loading_text()
browser.close_driver()