Python源码示例:logzero.logger.info()

示例1
def test_preferences(c: wda.Client):
    print("Status:", c.status())
    print("Info:", c.info)
    print("BatteryInfo", c.battery_info())
    print("AppCurrent:", c.app_current())
    # page_source = c.source()
    # assert "</XCUIElementTypeApplication>" in page_source

    app = c.session(bundle_id)
    selector = app(label="蜂窝网络")
    el = selector.get() 
    el.click()
    print("Element bounds:", el.bounds)

    logger.info("Take screenshot: %s", app.screenshot())
    
    app.swipe_right()
    app.swipe_up()

    app(label="电池").scroll()
    app(label="电池").click() 
示例2
def start_userbot():
    log.info("Starting Userbot...")
    bot_checker.start()
    log.info("Userbot running.")

    await bot_checker.idle()

    if settings.RUN_BOTCHECKER:
        pass
        # botchecker_context.update(
        #     {'checker': bot_checker, 'stop': threading.Event()})
        # updater.job_queue.run_repeating(
        #     botcheckerworker.ping_bots_job,
        #     context=botchecker_context,
        #     first=1.5,
        #     interval=settings.BOTCHECKER_INTERVAL
        # ) 
示例3
def add_keywords(bot, response, to_check):
    if not isinstance(response, Response) or response.empty:
        return

    full_text = response.full_text.lower()
    # Search for botbuilder pattern to see if this bot is a Manybot/Chatfuelbot/etc.
    if botbuilder_pattern.search(full_text):
        to_check.botbuilder = True

    # Search /start and /help response for global list of keywords
    to_add = []
    for name in Keyword.get_distinct_names(exclude_from_bot=to_check):
        if re.search(r'\b{}\b'.format(name), full_text, re.IGNORECASE):
            to_add.append(name)

    to_add = [x for x in to_add if x not in settings.FORBIDDEN_KEYWORDS]

    if to_add:
        Keyword.insert_many([dict(name=k, entity=to_check) for k in to_add]).execute()
        msg = 'New keyword{}: {} for {}.'.format(
            's' if len(to_add) > 1 else '',
            ', '.join(['#' + k for k in to_add]),
            to_check.str_no_md)
        bot.send_message(settings.BOTLIST_NOTIFICATIONS_ID, msg, timeout=40)
        log.info(msg) 
示例4
def send_botlist(bot, update, resend=False, silent=False):
    log.info("Re-sending BotList..." if resend else "Updating BotList...")

    channel = helpers.get_channel()
    revision = Revision.get_instance()
    revision.nr += 1
    revision.save()

    all_categories = Category.select_all()

    botlist = BotList(bot, update, channel, resend, silent)
    if resend:
        botlist.delete_full_botlist()
    botlist.update_intro()
    botlist.update_categories(all_categories)
    botlist.update_new_bots_list()
    botlist.update_category_list()
    botlist.send_footer()
    botlist.finish()
    channel.save()
    Statistic.of(update, 'send', 'botlist (resend: {})'.format(str(resend)), Statistic.IMPORTANT) 
示例5
def post(self, udid=None):
        """ 设备清理 """
        udid = udid or self.get_argument("udid")
        logger.info("Receive colding request for %s", udid)
        request_secret = self.get_argument("secret")
        if secret != request_secret:
            logger.warning("secret not match, expect %s, got %s", secret,
                           request_secret)
            return

        if udid not in udid2device:
            return

        device = udid2device[udid]
        await device.reset()
        await hbconn.device_update({
            "udid": udid,
            "colding": False,
            "provider": device.addrs(),
        })
        self.write({"success": True, "description": "Device colded"}) 
示例6
def _drain_queue(self):
        """
        Logic:
            - send message to server when server is alive
            - update local db
        """
        while True:
            message = await self._queue.get()
            if message is None:
                logger.info("Resent messages: %s", self._db)
                for _, v in self._db.items():
                    await self._ws.write_message(v)
                continue

            if 'udid' in message:  # ping消息不包含在裡面
                udid = message['udid']
                update_recursive(self._db, {udid: message})
            self._queue.task_done()

            if self._ws:
                try:
                    await self._ws.write_message(message)
                    logger.debug("websocket send: %s", message)
                except TypeError as e:
                    logger.info("websocket write_message error: %s", e) 
示例7
def _connect(self):
        ws = await websocket.websocket_connect(self._server_ws_url)
        ws.__class__ = SafeWebSocket

        await ws.write_message({
            "command": "handshake",
            "name": self._name,
            "owner": self._owner,
            "secret": self._secret,
            "url": self._provider_url,
            "priority": self._priority,  # the large the importanter
        })

        msg = await ws.read_message()
        logger.info("WS receive: %s", msg)
        return ws 
示例8
def runTest(self):
        logger.info("runTest")
        d.app_clear(self.package_name)
        s = d.session(self.package_name)
        s.set_fastinput_ime(True)

        xp = d.ext_xpath
        xp._d = s

        # 处理弹窗
        xp.when("跳过").click()
        xp.when("允许").click() # 系统弹窗
        # xp.when("@com.tencent.ibg.joox:id/btn_dismiss").click()

        xp("立即体验").click()
        logger.info("Search")
        xp("搜索").click()
        s.send_keys("周杰伦")
        s.send_action("search")
        self.assertTrue(xp("布拉格广场").wait())
        # xp("@com.tencent.ibg.joox:id/search_area").click()
        # xp("@com.tencent.ibg.joox:id/searchItem").click()
        # s.send_keys("One Call Away")
        # s.send_action("search") 
示例9
def validate(ctx: click.Context, source: str,
             no_verify_tls: bool = False) -> Experiment:
    """Validate the experiment at SOURCE."""
    settings = load_settings(ctx.obj["settings_path"])

    try:
        experiment = load_experiment(
            source, settings, verify_tls=not no_verify_tls)
    except InvalidSource as x:
        logger.error(str(x))
        logger.debug(x)
        ctx.exit(1)

    try:
        notify(settings, ValidateFlowEvent.ValidateStarted, experiment)
        ensure_experiment_is_valid(experiment)
        notify(settings, ValidateFlowEvent.ValidateCompleted, experiment)
        logger.info("experiment syntax and semantic look valid")
    except ChaosException as x:
        notify(settings, ValidateFlowEvent.ValidateFailed, experiment, x)
        logger.error(str(x))
        logger.debug(x)
        ctx.exit(1)

    return experiment 
示例10
def discover(ctx: click.Context, package: str,
             discovery_path: str = "./discovery.json",
             no_system_info: bool = False,
             no_install: bool = False) -> Discovery:
    """Discover capabilities and experiments."""
    settings = load_settings(ctx.obj["settings_path"])
    try:
        notify(settings, DiscoverFlowEvent.DiscoverStarted, package)
        discovery = disco(
            package_name=package, discover_system=not no_system_info,
            download_and_install=not no_install)
    except DiscoveryFailed as err:
        notify(settings, DiscoverFlowEvent.DiscoverFailed, package, err)
        logger.debug("Failed to discover {}".format(package), exc_info=err)
        logger.fatal(str(err))
        return

    with open(discovery_path, "w") as d:
        d.write(json.dumps(discovery, indent=2, default=encoder))
    logger.info("Discovery outcome saved in {p}".format(
        p=discovery_path))

    notify(settings, DiscoverFlowEvent.DiscoverCompleted, discovery)
    return discovery 
示例11
def line_measurement(self, image, thermal_np, cmap=cv.COLORMAP_JET):
        img = image.copy()
        line, point1, point2 = CFlir.get_line(img)
        line_temps = np.zeros(len(line))
    
        if len(img.shape) == 3:
            gray_values = np.arange(256, dtype=np.uint8)
            color_values = map(tuple, cv.applyColorMap(gray_values, cmap).reshape(256, 3))
            color_to_gray_map = dict(zip(color_values, gray_values))
            img = np.apply_along_axis(lambda bgr: color_to_gray_map[tuple(bgr)], 2, image)
        
        for i in range(0,len(line)):
            line_temps[i] = thermal_np[ line[i][1], line[i][0] ]
            
        cv.line(img, point1, point2, 255, 2, 8)
        
        plt.subplot(1, 5, (1,2) )
        plt.imshow(img, cmap='jet')
        plt.title('Image')
        plt.subplot(1, 5, (4,5) )
        plt.plot(line_temps)
        plt.title('Distance vs Temperature')
        plt.show() 
        
        logger.info(f'\nMin line: {np.amin(line_temps)}\nMax line: {np.amax(line_temps)}' ) 
示例12
def __init__(self, args, train=True):
        self.args = args  # argparse object
        self.logger = logger
        self.start_time = datetime.today()
        self.config = None  # only used for the inference

        if train:  # for training
            self.output_dir = self._return_output_dir()
            self.create_output_dir()
            log_filename = 'train.log'
        else:  # for inference
            self.output_dir = os.path.dirname(args.model)
            self.model_name = os.path.basename(args.model)
            log_filename = 'inference_{}.log'.format(self.model_name)

        log_name = os.path.join(self.output_dir, log_filename)
        logzero.logfile(log_name)
        self.log_name = log_name
        self.logger.info('Log filename: [{}]'.format(log_name)) 
示例13
def dump_git_info(self):
        """
        returns git commit id, diffs from the latest commit
        """
        if os.system('git rev-parse 2> /dev/null > /dev/null') == 0:
            self.logger.info('Git repository is found. Dumping logs & diffs...')
            git_log = '\n'.join(
                l for l in
                subprocess.check_output('git log --pretty=fuller | head -7', shell=True).decode('utf8').split('\n') if
                l)
            self.logger.info(git_log)

            git_diff = subprocess.check_output('git diff', shell=True).decode('utf8')
            self.logger.info(git_diff)
        else:
            self.logger.warn('Git repository is not found. Continue...') 
示例14
def setup_default_logger(logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0, disableStderrLogger=False):
    """
    Deprecated. Use `logzero.loglevel(..)`, `logzero.logfile(..)`, etc.

    Globally reconfigures the default `logzero.logger` instance.

    Usage:

    .. code-block:: python

        from logzero import logger, setup_default_logger
        setup_default_logger(level=logging.WARN)
        logger.info("hello")  # this will not be displayed anymore because minimum loglevel was set to WARN

    :arg string logfile: If set, also write logs to the specified filename.
    :arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: `logging.DEBUG`).
    :arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter).
    :arg int maxBytes: Size of the logfile when rollover should occur. Defaults to 0, rollover never occurs.
    :arg int backupCount: Number of backups to keep. Defaults to 0, rollover never occurs.
    :arg bool disableStderrLogger: Should the default stderr logger be disabled. Defaults to False.
    """
    global logger
    logger = setup_logger(name=LOGZERO_DEFAULT_LOGGER, logfile=logfile, level=level, formatter=formatter, disableStderrLogger=disableStderrLogger)
    return logger 
示例15
def run_triage_worker(numbers):
    thispid = os.getpid()
    logger.info('%s started with %s numbers' % (str(thispid), len(numbers)))
    tfh,tfn = tempfile.mkstemp(suffix='.json')
    #logger.info('%s %s' % (thispid, tfh))
    logger.info('%s %s' % (thispid, tfn))

    with open(tfn, 'w') as f:
        f.write(json.dumps(numbers))

    args = sys.argv[1:]
    args.append('--id=%s' % tfn)
    logger.info(args)

    triager = AnsibleTriage(args=args, update_checkouts=False)
    triager.run()

    os.remove(tfn)
    return (tfn) 
示例16
def setup_default_logger(logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0):
    """
    Deprecated. Use `logzero.loglevel(..)`, `logzero.logfile(..)`, etc.

    Globally reconfigures the default `logzero.logger` instance.

    Usage:

    .. code-block:: python

        from logzero import logger, setup_default_logger
        setup_default_logger(level=logging.WARN)
        logger.info("hello")  # this will not be displayed anymore because minimum loglevel was set to WARN

    :arg string logfile: If set, also write logs to the specified filename.
    :arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: `logging.DEBUG`).
    :arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter).
    :arg int maxBytes: Size of the logfile when rollover should occur. Defaults to 0, rollover never occurs.
    :arg int backupCount: Number of backups to keep. Defaults to 0, rollover never occurs.
    """
    global logger
    logger = setup_logger(name=LOGZERO_DEFAULT_LOGGER, logfile=logfile, level=level, formatter=formatter)
    return logger 
示例17
def wda_status(self):
        """
        Returns:
            dict or None
        """
        try:
            request = httpclient.HTTPRequest(self.wda_device_url + "/status",
                                             connect_timeout=3,
                                             request_timeout=15)
            client = httpclient.AsyncHTTPClient()
            resp = await client.fetch(request)
            info = json.loads(resp.body)
            self.__wda_info = info
            return info
        except httpclient.HTTPError as e:
            logger.debug("%s request wda/status error: %s", self, e)
            return None
        except (ConnectionResetError, ConnectionRefusedError):
            logger.debug("%s waiting for wda", self)
            return None
        except Exception as e:
            logger.warning("%s ping wda unknown error: %s %s", self, type(e),
                           e)
            return None 
示例18
def _drain_queue(self):
        """
        Logic:
            - send message to server when server is alive
            - update local db
        """
        while True:
            message = await self._queue.get()
            if message is None:
                logger.info("Resent messages: %s", self._db)
                for _, v in self._db.items():
                    await self._ws.write_message(v)
                continue

            if 'udid' in message:  # ping消息不包含在裡面
                udid = message['udid']
                update_recursive(self._db, {udid: message})
            self._queue.task_done()

            if self._ws:
                try:
                    await self._ws.write_message(message)
                    logger.debug("websocket send: %s", message)
                except TypeError as e:
                    logger.info("websocket write_message error: %s", e) 
示例19
def _connect(self):
        ws = await websocket.websocket_connect(self._ws_url, ping_interval=3)
        ws.__class__ = SafeWebSocket

        await ws.write_message({
            "command": "handshake",
            "name": self._name,
            "owner": self._owner,
            "secret": self._secret,
            "url": self._provider_url,
            "priority": self._priority,  # the large the importanter
        })

        msg = await ws.read_message()
        logger.info("WS receive: %s", msg)
        return ws 
示例20
def discover(discover_system: bool = True) -> Discovery:
    """
    Discover AWS capabilities from this extension as well, if a aws
    configuration is available, some information about the AWS environment.
    """
    logger.info("Discovering capabilities from chaostoolkit-aws")

    discovery = initialize_discovery_result(
        "chaostoolkit-aws", __version__, "aws")
    discovery["activities"].extend(load_exported_activities())

    return discovery


###############################################################################
# Private functions
############################################################################### 
示例21
def download_google_file(self, string):
        default_workspace = os.path.join(os.environ['HOME'], 'bulk-download')
        if self.tempdir != None:
            workspace = self.tempdir
        else:
            workspace = default_workspace
        if not os.path.exists(workspace):
            logger.info("Creating directory: {}".format(workspace))
            os.makedirs(workspace)
        #Note: this will only work on the cloud
        #If you have to run outside the cloud you could authenticate
        #with `gcloud auth application-default login` but this is
        #actually not a good idea to do as yourself.  Use a service
        #account if you have to do this.
        #See: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
        global storage_client
        if storage_client is None:
          import google.auth
          from google.cloud import storage
          credentials, project = google.auth.default()
          storage_client = storage.Client(credentials=credentials, project=project)
          logger.info("Getting storage client")
        return self.download_blob(string, storage_client, workspace) 
示例22
def get_observations_from_delphiki():
    #fn = 'jobresults.2.7.7/.cache/observations.json'
    #fn = 'rhtestOct17/270/.cache/observations.json'
    #fn = 'rhtestOct17/242/.cache/observations.json'

    #fn = 'rhtest-02-14-2019/jobresults.2.4.6.0/.cache/observations.json'
    #fn = 'rhtest-02-14-2019/jobresults.2.8.0.dev0/.cache/observations.json'

    dn = sys.argv[1]
    fn = os.path.join(dn, '.cache', 'observations.json')
    if not os.path.exists(fn):
        raise Exception('%s does not exist' % fn)
    logger.info('reading %s' % fn)
    with open(fn, 'r') as f:
        obs = json.loads(f.read())
    logger.info('%s observations found' % len(obs))
    return obs 
示例23
def sc_notify(event):
    logger.info("SmartContract Runtime.Notify event: %s", event)

    # Make sure that the event payload list has at least one element.
    if not isinstance(event.event_payload, ContractParameter) or event.event_payload.Type != ContractParameterType.Array or not len(event.event_payload.Value):
        return

    # The event payload list has at least one element. As developer of the smart contract
    # you should know what data-type is in the bytes, and how to decode it. In this example,
    # it's just a string, so we decode it with utf-8:
    logger.info("- payload part 1: %s", event.event_payload.Value[0].Value.decode("utf-8"))


#
# Custom code that runs in the background
# 
示例24
def main():
    loop = asyncio.get_event_loop()

    # because a KeyboardInterrupt is so violent it can shutdown the DB in an unpredictable state.
    loop.add_signal_handler(SIGINT, system_exit)

    main_task = loop.create_task(setup_and_start(loop))

    try:
        loop.run_forever()
    except SystemExit:
        logger.info("Shutting down...")
        site = main_task.result()
        loop.run_until_complete(site.stop())

        p2p = NetworkService()
        loop.run_until_complete(p2p.shutdown())

        loop.run_until_complete(shutdown())
        loop.stop()
    finally:
        loop.close()

    logger.info("Closing databases...")
    Blockchain.Default().Dispose() 
示例25
def setup_and_start(loop):
    # Use TestNet
    settings.setup_testnet()

    # Setup the blockchain
    blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
    Blockchain.RegisterBlockchain(blockchain)

    p2p = NetworkService()
    loop.create_task(p2p.start())
    bg_task = loop.create_task(custom_background_code())

    # Disable smart contract events for external smart contracts
    settings.set_log_smart_contract_events(False)

    # Run all the things (blocking call)
    logger.info("Everything setup and running. Waiting for events...")
    return bg_task 
示例26
def main():
    loop = asyncio.get_event_loop()

    # because a KeyboardInterrupt is so violent it can shutdown the DB in an unpredictable state.
    loop.add_signal_handler(SIGINT, system_exit)
    main_task = loop.create_task(setup_and_start(loop))

    try:
        loop.run_forever()
    except SystemExit:
        logger.info("Shutting down...")
        p2p = NetworkService()
        loop.run_until_complete(p2p.shutdown())
        loop.run_until_complete(shutdown())
        loop.stop()
    finally:
        loop.close()

    Blockchain.Default().Dispose() 
示例27
def discover(discover_system: bool = True) -> Discovery:
    """
    Discover Kubernetes capabilities offered by this extension.
    """
    logger.info("Discovering capabilities from chaostoolkit-kubernetes")

    discovery = initialize_discovery_result(
        "chaostoolkit-kubernetes", __version__, "kubernetes")
    discovery["activities"].extend(load_exported_activities())
    return discovery


###############################################################################
# Private functions
############################################################################### 
示例28
def setup_logging():
    sentry_logging = LoggingIntegration(
        level=logging.INFO,  # Capture info and above as breadcrumbs
        event_level=logging.WARNING,  # Send errors as events
    )
    sentry_sdk.init(
        settings.SENTRY_URL,
        integrations=[sentry_logging],
        environment=settings.SENTRY_ENVIRONMENT,
    ) 
示例29
def download_session(session_name: str, output_path: Path) -> str:
    session_name = session_name.replace(".session", "") + ".session"
    session = client.get_object(BUCKET_NAME, session_name)
    out_path = str(output_path / session_name)
    with open(out_path, 'wb') as file_data:
        for d in session.stream(32 * 1024):
            file_data.write(d)
    log.info(f"Downloaded session '{session_name}' to '{output_path}'.")
    return out_path 
示例30
def disable_decider(bot: TelegramBot, to_check: BotModel):
    assert to_check.disabled_reason != BotModel.DisabledReason.banned

    if (
            to_check.offline and
            to_check.offline_for > settings.DISABLE_BOT_INACTIVITY_DELTA and
            to_check.disabled_reason != BotModel.DisabledReason.offline
    ):
        # Disable if the bot has been offline for too long
        if to_check.disable(to_check.DisabledReason.offline):
            to_check.save()

            if to_check.last_response:
                reason = "its last response was " + helpers.slang_datetime(to_check.last_response)
            else:
                reason = "it's been offline for.. like... ever"

            msg = "❌ {} disabled as {}.".format(to_check, reason)
            log.info(msg)
            bot.send_message(settings.BOTLIST_NOTIFICATIONS_ID, msg, timeout=30,
                             parse_mode='markdown')
        else:
            log.info("huhwtf")
    elif (
            to_check.online and
            to_check.disabled_reason == BotModel.DisabledReason.offline
    ):
        # Re-enable if the bot is disabled and came back online
        if to_check.enable():
            to_check.save()
            msg = "{} was included in the @BotList again as it came back online.".format(to_check)
            log.info(msg)
            bot.send_message(settings.BOTLIST_NOTIFICATIONS_ID, msg, timeout=30,
                             parse_mode='markdown')
        else:
            log.info("huhwtf")