Python源码示例:colorama.Style.BRIGHT
示例1
def print_subreddits(self, parser, reddit, search_for):
print("\nChecking if Subreddit(s) exist...")
subs, not_subs = self._find_subs(parser, reddit, search_for)
if subs:
print("\nThe following Subreddits were found and will be scraped:")
print("-" * 56)
print(*subs, sep = "\n")
if not_subs:
print("\nThe following Subreddits were not found and will be skipped:")
print("-" * 60)
print(*not_subs, sep = "\n")
if not subs:
print(Fore.RED + Style.BRIGHT + "\nNo Subreddits to scrape!")
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
quit()
return subs
示例2
def get_subreddits(self, parser, reddit):
subreddit_prompt = Style.BRIGHT + """
Enter Subreddit or a list of Subreddits (separated by a space) to scrape:
""" + Style.RESET_ALL
while True:
try:
search_for = str(input(subreddit_prompt))
if not search_for:
raise ValueError
return PrintSubs().print_subreddits(parser, reddit, search_for)
except ValueError:
print("No Subreddits were specified! Try again.")
### Update Subreddit settings in master dictionary.
示例3
def get_settings(self, master, subs):
for sub in subs:
while True:
try:
cat_i = int(input((Style.BRIGHT + """
Select a category to display for r/%s
-------------------
0: Hot
1: New
2: Controversial
3: Top
4: Rising
5: Search
-------------------
""" + Style.RESET_ALL) % sub))
if cat_i == 5:
print("\nSelected search")
self._get_search(cat_i, master, sub)
else:
print("\nSelected category: %s" % self._categories[cat_i])
self._get_n_results(cat_i, master, sub)
break
except (IndexError, ValueError):
print("Not an option! Try again.")
示例4
def confirm_subreddits(subs, parser):
while True:
try:
confirm = input("\nConfirm selection? [Y/N] ").strip().lower()
if confirm == options[0]:
subs = [sub for sub in subs]
return subs
elif confirm not in options:
raise ValueError
else:
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
parser.exit()
except ValueError:
print("Not an option! Try again.")
### Scrape again?
示例5
def run(args, parser, reddit):
Titles.Titles.b_title()
while True:
while True:
master = RunBasic._create_settings(parser, reddit)
confirm = RunBasic._print_confirm(args, master)
if confirm == options[0]:
break
else:
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
parser.exit()
Subreddit.GetSortWrite().gsw(args, reddit, master)
repeat = ConfirmInput.another()
if repeat == options[1]:
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
break
示例6
def list_submissions(reddit, post_list, parser):
print("\nChecking if post(s) exist...")
posts, not_posts = Validation.Validation.existence(s_t[2], post_list, parser, reddit, s_t)
if not_posts:
print(Fore.YELLOW + Style.BRIGHT +
"\nThe following posts were not found and will be skipped:")
print(Fore.YELLOW + Style.BRIGHT + "-" * 55)
print(*not_posts, sep = "\n")
if not posts:
print(Fore.RED + Style.BRIGHT + "\nNo submissions to scrape!")
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
quit()
return posts
示例7
def list_redditors(parser, reddit, user_list):
print("\nChecking if Redditor(s) exist...")
users, not_users = Validation.Validation.existence(s_t[1], user_list, parser, reddit, s_t)
if not_users:
print(Fore.YELLOW + Style.BRIGHT +
"\nThe following Redditors were not found and will be skipped:")
print(Fore.YELLOW + Style.BRIGHT + "-" * 59)
print(*not_users, sep = "\n")
if not users:
print(Fore.RED + Style.BRIGHT + "\nNo Redditors to scrape!")
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
quit()
return users
示例8
def master_timer(function):
def wrapper(*args):
logging.info("INITIALIZING URS.")
logging.info("")
start = time.time()
try:
function(*args)
except KeyboardInterrupt:
print(Style.BRIGHT + Fore.RED + "\n\nURS ABORTED BY USER.\n")
logging.warning("")
logging.warning("URS ABORTED BY USER.\n")
quit()
logging.info("URS COMPLETED SCRAPES IN %.2f SECONDS.\n" % \
(time.time() - start))
return wrapper
示例9
def list_subreddits(parser, reddit, s_t, sub_list):
print("\nChecking if Subreddit(s) exist...")
subs, not_subs = Validation.Validation().existence(s_t[0], sub_list, parser, reddit, s_t)
if not_subs:
print(Fore.YELLOW + Style.BRIGHT +
"\nThe following Subreddits were not found and will be skipped:")
print(Fore.YELLOW + Style.BRIGHT + "-" * 60)
print(*not_subs, sep = "\n")
if not subs:
print(Fore.RED + Style.BRIGHT + "\nNo Subreddits to scrape!")
print(Fore.RED + Style.BRIGHT + "\nExiting.\n")
quit()
return subs
示例10
def crimeflare(target):
print_out(Fore.CYAN + "Scanning crimeflare database...")
with open("data/ipout", "r") as ins:
crimeFoundArray = []
for line in ins:
lineExploded = line.split(" ")
if lineExploded[1] == args.target:
crimeFoundArray.append(lineExploded[2])
else:
continue
if (len(crimeFoundArray) != 0):
for foundIp in crimeFoundArray:
print_out(Style.BRIGHT + Fore.WHITE + "[FOUND:IP] " + Fore.GREEN + "" + foundIp.strip())
else:
print_out("Did not find anything.")
示例11
def update():
print_out(Fore.CYAN + "Just checking for updates, please wait...")
print_out(Fore.CYAN + "Updating CloudFlare subnet...")
if(args.tor == False):
headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'}
r = requests.get("https://www.cloudflare.com/ips-v4", headers=headers, cookies={'__cfduid': "d7c6a0ce9257406ea38be0156aa1ea7a21490639772"}, stream=True)
with open('data/cf-subnet.txt', 'wb') as fd:
for chunk in r.iter_content(4000):
fd.write(chunk)
else:
print_out(Fore.RED + Style.BRIGHT+"Unable to fetch CloudFlare subnet while TOR is active")
print_out(Fore.CYAN + "Updating Crimeflare database...")
r = requests.get("http://crimeflare.net:83/domains/ipout.zip", stream=True)
with open('data/ipout.zip', 'wb') as fd:
for chunk in r.iter_content(4000):
fd.write(chunk)
zip_ref = zipfile.ZipFile("data/ipout.zip", 'r')
zip_ref.extractall("data/")
zip_ref.close()
os.remove("data/ipout.zip")
# END FUNCTIONS
示例12
def process_search(options):
search_query = []
search_query.extend([hex_pattern(val.replace(' ', '')) for val in options.hex])
search_query.extend([ascii_pattern(val) for lst in options.a for val in lst])
search_query.extend([wide_pattern(val) for lst in options.w for val in lst])
result = BINOBJ.search(
search_query, limit=options.limit, exact=options.exact, test=options.test)
if 'error' in result:
print(Style.BRIGHT + Fore.RED + result['error']['message'])
return
if 'stats' in result:
show_stats_new(result['stats'], options.limit)
if len(result['results']) == 0:
return
# if len(result['results']) >= options.limit:
# print("Showing top {0} results:".format(options.limit))
# else:
# print("Results:")
show_results(result['results'], pretty_print=options.pretty_print)
示例13
def print_line(message, level=1, category = None, title = None, status=False):
sts = get_status(category, status)
if sts == 'applied':
color = Fore.GREEN
pre = '[+] '
elif sts == 'touse':
color = Fore.YELLOW
pre = '[+] '
elif sts == 'toremove':
color = Fore.RED
pre = '[-] '
else:
color = ''
pre = ''
if title:
print(' '*4*level + Style.BRIGHT + title + ': ' + Style.RESET_ALL + message)
else:
print(' '*4*level + color + Style.BRIGHT + pre + Fore.RESET + message)
示例14
def check_names(infile): #Checking the path to the wordlist
if os.path.exists(infile):
if status_method:
banner() #calls the banner function
checkasciiwolf() #calls the sexy ASCII wolf wallpaper
scan_start()
statusfindAdmin() #calls the function that basically does the job
print(Fore.RED + Style.BRIGHT + "\n[+] Rock bottom;\n" + Style.RESET_ALL)
elif error_method:
banner()
checkasciiwolf()
scan_start()
findAdmin()
print(Fore.RED + Style.BRIGHT + "\n[+] Rock bottom;\n" + Style.RESET_ALL)
else: #in case wordlist cant be found
banner()
opts()
print(Fore.RED + Style.BRIGHT + "[-] Invalid path to the wordlist. File could not be found.\n" + Style.RESET_ALL)
# THIS IS THE STATUS CODE METHOD
示例15
def test_description(self):
config = sb.objects.config.Config(name="test-project", description="A test project", version="0.1.0")
sbParser = sb.systems.parsing.skeleParser.SkeleParser(config, "test")
description = sbParser.desc
expectedDescription = Style.BRIGHT + "Test Project" + Style.RESET_ALL + """
A test project
-----------------------------------
Version: 0.1.0
Environment: test
Skelebot Version: 6.6.6
-----------------------------------"""
self.assertEqual(description, expectedDescription)
示例16
def title():
print(Fore.WHITE + Style.BRIGHT + r"""
__ __ _ __ ____
/\ \/\ \/\`'__\/',__\
\ \ \_\ \ \ \//\__, `\
\ \____/\ \_\\/\____/
\/___/ \/_/ \/___/
""")
### Print Subreddit scraper title.
示例17
def r_title():
print(Fore.WHITE + Style.BRIGHT + r"""
_ __
/\`'__\
\ \ \/
\ \_\
\/_/
""")
### Print Redditor scraper title.
示例18
def u_title():
print(Fore.WHITE + Style.BRIGHT + r"""
__ __
/\ \/\ \
\ \ \_\ \
\ \____/
\/___/
""")
### Print comments scraper title.
示例19
def c_title():
print(Fore.WHITE + Style.BRIGHT + r"""
___
/'___\
/\ \__/
\ \____\
\/____/
""")
### Print basic scraper title.
示例20
def b_title():
print(Fore.WHITE + Style.BRIGHT + r"""
__
/\ \
\ \ \____
\ \ '__`\
\ \ \L\ \
\ \_,__/
\/___/... Only scrapes Subreddits.
""")
### Print error title.
示例21
def p_title(error):
print(Fore.RED + Style.BRIGHT + r"""
_____
/\ '__`\
\ \ \L\ \
\ \ ,__/... Please recheck API credentials or your internet connection.
\ \ \/
\ \_\
\/_/
Prawcore exception: %s
""" % error)
### Print rate limit error title.
示例22
def l_title(reset_timestamp):
print(Fore.RED + Style.BRIGHT + r"""
__
/\ \
\ \ \
\ \ \ __
\ \ \L\ \
\ \____/
\/___/... You have reached your rate limit.
Please try again when your rate limit is reset: %s
""" % reset_timestamp)
示例23
def _get_search(self, cat_i, master, sub):
while True:
try:
search_for = str(input(
Style.BRIGHT + "\nWhat would you like to search for in r/" +
sub + "? " + Style.RESET_ALL)).strip()
if not search_for:
raise ValueError
else:
self._update_master(cat_i, master, search_for, sub)
break
except ValueError:
print("Not an option! Try again.")
### Get number of results.
示例24
def validate_user(parser, reddit):
print(Style.BRIGHT + Fore.GREEN +
"\nYou have successfully logged in as u/%s.\n" % reddit.user.me())
Validation.print_rate_limit(reddit)
### Check Subreddits.
示例25
def _get_raw(self, all_dict, submission):
print(Style.BRIGHT +
"\nProcessing all comments in raw format from submission '%s'..." %
submission.title)
SortComments().sort(all_dict, True, submission)
### Get comments in structured format.
示例26
def _get_structured(self, all_dict, limit, submission):
plurality = "comment" if int(limit) == 1 else "comments"
print(Style.BRIGHT +
("\nProcessing %s %s in structured format from submission '%s'...") %
(limit, plurality, submission.title))
SortComments().sort(all_dict, False, submission)
return {key: all_dict[key] for key in list(all_dict)[:int(limit)]}
### Get comments from posts.
示例27
def _print_confirm(args, title):
confirmation = "\nJSON file for '%s' comments created." % title \
if args.json else \
"\nCSV file for '%s' comments created." % title
print(Style.BRIGHT + Fore.GREEN + confirmation)
print(Style.BRIGHT + Fore.GREEN + "-" * (len(confirmation) - 1))
### Get, sort, then write scraped comments to CSV or JSON.
示例28
def sort_access(self):
for cat, obj in zip(self._access_names, self._access):
try:
self._extract(cat, obj, self._s_types[3])
except PrawcoreException as error:
print(Style.BRIGHT + Fore.RED +
("\nACCESS TO %s OBJECTS FORBIDDEN: %s. SKIPPING.") %
(cat.upper(), error))
self._overview["%s (may be forbidden)" %
cat.capitalize()].append("FORBIDDEN")
示例29
def _print_confirm(self, args, user):
confirmation = "\nJSON file for u/%s created." % user if args.json \
else "\nCSV file for u/%s created." % user
print(Style.BRIGHT + Fore.GREEN + confirmation)
print(Style.BRIGHT + Fore.GREEN + "-" * (len(confirmation) - 1))
### Get, sort, then write scraped Redditor information to CSV or JSON.
示例30
def log_cancel(function):
def wrapper(*args):
try:
function(*args)
except KeyboardInterrupt:
print(Fore.RED + Style.BRIGHT + "\n\nCancelling.\n")
logging.info("")
logging.info("SUBREDDIT SCRAPING CANCELLED BY USER.\n")
quit()
return wrapper