Python源码示例:six.moves.queue.Queue()

示例1
def reset_state(self):
        super(MultiThreadMapData, self).reset_state()
        if self._threads:
            self._threads[0].stop()
            for t in self._threads:
                t.join()

        self._in_queue = queue.Queue()
        self._out_queue = queue.Queue()
        self._evt = threading.Event()
        self._threads = [MultiThreadMapData._Worker(
            self._in_queue, self._out_queue, self._evt, self.map_func)
            for _ in range(self.num_thread)]
        for t in self._threads:
            t.start()

        self._guard = DataFlowReentrantGuard()

        # Call once at the beginning, to ensure inq+outq has a total of buffer_size elements
        self._fill_buffer() 
示例2
def __init__(self, get_df, num_prefetch, num_thread):
        """
        Args:
            get_df ( -> DataFlow): a callable which returns a DataFlow.
                Each thread will call this function to get the DataFlow to use.
                Therefore do not return the same DataFlow object for each call,
                unless your dataflow is stateless.
            num_prefetch (int): size of the queue
            num_thread (int): number of threads
        """
        assert num_thread > 0, num_thread
        assert num_prefetch > 0, num_prefetch
        self.num_thread = num_thread
        self.queue = queue.Queue(maxsize=num_prefetch)
        self.threads = [
            MultiThreadRunner._Worker(get_df, self.queue)
            for _ in range(num_thread)]

        try:
            self._size = self.__len__()
        except NotImplementedError:
            self._size = -1 
示例3
def setUpClass(self):
        self.projectdb = ProjectDB([os.path.join(os.path.dirname(__file__), 'data_fetcher_processor_handler.py')])
        self.fetcher = Fetcher(None, None, async_mode=False)
        self.status_queue = Queue()
        self.newtask_queue = Queue()
        self.result_queue = Queue()
        self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887, passthrough_errors=False)
        self.httpbin = 'http://127.0.0.1:14887'
        self.proxy_thread = subprocess.Popen(['pyproxy', '--username=binux',
                                              '--password=123456', '--port=14830',
                                              '--debug'], close_fds=True)
        self.proxy = '127.0.0.1:14830'
        self.processor = Processor(projectdb=self.projectdb,
                                   inqueue=None,
                                   status_queue=self.status_queue,
                                   newtask_queue=self.newtask_queue,
                                   result_queue=self.result_queue)
        self.project_name = 'data_fetcher_processor_handler'
        time.sleep(0.5) 
示例4
def get(self):
        '''Get a task from queue when bucket available'''
        if self.bucket.get() < 1:
            return None
        now = time.time()
        self.mutex.acquire()
        try:
            task = self.priority_queue.get_nowait()
            self.bucket.desc()
        except Queue.Empty:
            self.mutex.release()
            return None
        task.exetime = now + self.processing_timeout
        self.processing.put(task)
        self.mutex.release()
        return task.taskid 
示例5
def _check_task_done(self):
        '''Check status queue'''
        cnt = 0
        try:
            while True:
                task = self.status_queue.get_nowait()
                # check _on_get_info result here
                if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
                    if task['project'] not in self.projects:
                        continue
                    project = self.projects[task['project']]
                    project.on_get_info(task['track'].get('save') or {})
                    logger.info(
                        '%s on_get_info %r', task['project'], task['track'].get('save', {})
                    )
                    continue
                elif not self.task_verify(task):
                    continue
                self.on_task_status(task)
                cnt += 1
        except Queue.Empty:
            pass
        return cnt 
示例6
def setup(self, config):
    """
    Establish connection to Elasticsearch cluster and start periodic commit.

    :param config: Configuration object.
    :type config: ``dict``
    """
    self.config = config
    self.context_size = config.get(helper.CONTEXT_SIZE, 120)
    self.elastic_bulk = queue.Queue()
    self.elastic = self.config[helper.INJECTOR].get_elasticsearch()
    self.helper = self.config[helper.INJECTOR].get_elasticsearch_helper()
    self.create_mapping()

    thread = threading.Thread(target=self._commit, args=())
    thread.daemon = True
    thread.start()
    self.thread = thread 
示例7
def __init__(self, agent, in_queue_size):
        """
        Initializes the worker with a RLGraph agent and queues for

        Args:
            agent (Agent): RLGraph agent used to execute local updates.
            input_queue (queue.Queue): Input queue the worker will use to poll samples.
            output_queue (queue.Queue): Output queue the worker will use to push results of local
                update computations.
        """
        super(UpdateWorker, self).__init__()

        # Agent to use for updating.
        self.agent = agent
        self.input_queue = queue.Queue(maxsize=in_queue_size)
        self.output_queue = queue.Queue()

        # Terminate when host process terminates.
        self.daemon = True

        # Flag for main thread.
        self.update_done = False 
示例8
def test_response_on_queue(self):
        g = MockGrader()
        pl = self._make_payload({
            'student_response': 'blah',
            'grader_payload': json.dumps({
                'grader': 'correct'
                })
            })
        q = Queue()
        reply = g.process_item(pl, queue=q)
        popped = q.get()
        self.assertEqual(reply, popped)

        del pl['xqueue_body']
        try:
            g.process_item(pl, queue=q)
        except Exception as e:
            popped = q.get()
            self.assertEqual(e, popped) 
示例9
def __init__(self, rate=RATE, chunk=CHUNK, server_name='127.0.0.1',
                 port=4444):
        self._rate = rate
        self._chunk = chunk
        self._server_name = server_name
        self._port = port

        # Socket for connection
        self.s = None
        self._connected = False

        # Audio data thread to get data from server
        self.data_thread = threading.Thread(target=self._get_server_data)
        self.data_thread.daemon = True

        # Create a thread-safe buffer of audio data
        self._buff = queue.Queue()
        self.closed = True 
示例10
def setup_client_backend(self):
        """
        Setup backend client thread as daemon

        Returns:
            None

        """
        self.backend_queue = queue.Queue()
        self.backend_stop_event = threading.Event()
        self.setup_client()
        t = threading.Thread(target=self._backend_worker, name="airtouch")
        # t.daemon = True
        t.start()
        self.backend_thread = t
        self.handle = self.backend_queue.put 
示例11
def __init__(self, predictors, batch_size=5, debug_charts=False, worker_id=None, neptune_client=None):
        """ :param predictors: a list of OnlinePredictor"""
        assert len(predictors)
        for k in predictors:
            #assert isinstance(k, OnlinePredictor), type(k)
            # TODO use predictors.return_input here
            assert k.return_input == False
            #queue_size=len(predictors)*100
            queue_size=len(predictors)*1
        self.input_queue = queue.Queue(maxsize=queue_size)
        self.threads = [
            PredictorWorkerThread(
                self.input_queue, f, id, batch_size=batch_size,
                debug_charts=debug_charts,
                worker_id=worker_id,
                neptune_client=neptune_client)
            for id, f in enumerate(predictors)]

        if six.PY2:
            # TODO XXX set logging here to avoid affecting TF logging
            import tornado.options as options
            options.parse_command_line(['--logging=debug']) 
示例12
def __init__(self, worker_id, neptune_client, pipe_c2s, pipe_s2c, model, dummy, predictor_threads, predict_batch_size=16, do_train=True):
        # predictor_threads is previous PREDICTOR_THREAD
        super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c, args.simulator_procs, os.getpid())
        self.M = model
        self.do_train = do_train

        # the second queue is here!
        self.queue = queue.Queue(maxsize=args.my_sim_master_queue)
        self.dummy = dummy
        self.predictor_threads = predictor_threads

        self.last_queue_put = start_timer()
        self.queue_put_times = []
        self.predict_batch_size = predict_batch_size
        self.counter = 0

        self.worker_id = worker_id
        self.neptune_client = neptune_client
        self.stats = defaultdict(StatCounter)
        self.games = StatCounter() 
示例13
def run(self):
        if not self.containers:
            return

        queue = Queue()
        thread_args = queue, self.log_args
        thread_map = build_thread_map(self.containers, self.presenters, thread_args)

        for line in consume_queue(queue, self.cascade_stop):
            remove_stopped_threads(thread_map)

            if not line:
                if not thread_map:
                    # There are no running containers left to tail, so exit
                    return
                # We got an empty line because of a timeout, but there are still
                # active containers to tail, so continue
                continue

            try:
                self.output.write(line)
                self.output.flush()
            except ValueError:
                # ValueError: I/O operation on closed file
                break 
示例14
def __init__(self, num_threads):
        assert num_threads >= 1
        self.task_queue = Queue.Queue()
        self.result_queues = dict()
        self.num_threads = num_threads
        for idx in range(self.num_threads):
            thread = WorkerThread(self.task_queue)
            thread.daemon = True
            thread.start() 
示例15
def add_task(self, func, args=()):
        assert hasattr(func, '__call__') # must be a function
        if func not in self.result_queues:
            self.result_queues[func] = Queue.Queue()
        self.task_queue.put((func, args, self.result_queues[func])) 
示例16
def setUp(self):
        self.tracer = MockTracer()
        self.queue = queue.Queue()
        self.server = Server(tracer=self.tracer, queue=self.queue)
        self.server.start() 
示例17
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
        """
        Args:
            ds (DataFlow): the dataflow to map on
            nr_proc(int): number of processes
            map_func (data component -> ndarray | None): the mapping function
            output_shape (tuple): the shape of the output of map_func
            output_dtype (np.dtype): the type of the output of map_func
            index (int): the index of the datapoint component to map on.
        """
        self.ds = ds
        self.nr_proc = nr_proc
        self.map_func = map_func
        self.output_shape = output_shape
        self.output_dtype = np.dtype(output_dtype).type
        self.index = index

        self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
        id_queue = mp.Queue()
        for k in range(nr_proc):
            id_queue.put(k)

        def _init_pool(arrs, queue, map_func):
            id = queue.get()
            global SHARED_ARR, WORKER_ID, MAP_FUNC
            SHARED_ARR = arrs[id]
            WORKER_ID = id
            MAP_FUNC = map_func

        self._pool = mp.pool.Pool(
            processes=nr_proc,
            initializer=_init_pool,
            initargs=(self._shared_mem, id_queue, map_func)) 
示例18
def __init__(self, ds, num_prefetch, num_proc):
        """
        Args:
            ds (DataFlow): input DataFlow.
            num_prefetch (int): size of the queue to hold prefetched datapoints.
                Required.
            num_proc (int): number of processes to use. Required.
        """
        # https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#the-spawn-and-forkserver-start-methods
        if os.name == 'nt':
            logger.warn("MultiProcessRunner does support Windows. \
However, Windows requires more strict picklability on processes, which may \
lead of failure on some of the code.")
        super(MultiProcessRunner, self).__init__(ds)
        try:
            self._size = len(ds)
        except NotImplementedError:
            self._size = -1
        assert num_proc > 0, num_proc
        assert num_prefetch > 0, num_prefetch
        self.num_proc = num_proc
        self.num_prefetch = num_prefetch

        if num_proc > 1:
            logger.info("[MultiProcessRunner] Will fork a dataflow more than one times. "
                        "This assumes the datapoints are i.i.d.")

        self.queue = mp.Queue(self.num_prefetch)
        self.procs = [MultiProcessRunner._Worker(self.ds, self.queue, idx)
                      for idx in range(self.num_proc)]
        ensure_proc_terminate(self.procs)
        self._reset_done = False 
示例19
def test_task_queue_in_time_order(self):
        tq = TaskQueue(rate=300, burst=1000)

        queues = dict()
        tasks = dict()

        for i in range(0, 100):
            it = InQueueTask(str(i), priority=int(i // 10), exetime=0)
            tq.put(it.taskid, it.priority, it.exetime)

            if it.priority not in queues:
                queues[it.priority] = Queue.Queue()

            q = queues[it.priority]  # type:Queue.Queue
            q.put(it)
            tasks[it.taskid] = it
            # six.print_('put, taskid=', it.taskid, 'priority=', it.priority, 'exetime=', it.exetime)
        for i in range(0, 100):
            task_id = tq.get()
            task = tasks[task_id]
            q = queues[task.priority]  # type: Queue.Queue
            expect_task = q.get()
            self.assertEqual(task_id, expect_task.taskid)
            self.assertEqual(task.priority, int(9 - i // 10))
            # six.print_('get, taskid=', task.taskid, 'priority=', task.priority, 'exetime=', task.exetime)

        self.assertEqual(tq.size(), 100)
        self.assertEqual(tq.priority_queue.qsize(), 0)
        self.assertEqual(tq.processing.qsize(), 100)
        for q in six.itervalues(queues):  # type:Queue.Queue
            self.assertEqual(q.qsize(), 0)
        pass 
示例20
def send_task(self, task, force=True):
        '''
        dispatch task to fetcher

        out queue may have size limit to prevent block, a send_buffer is used
        '''
        try:
            self.out_queue.put_nowait(task)
        except Queue.Full:
            if force:
                self._send_buffer.appendleft(task)
            else:
                raise 
示例21
def _start_threads(self):
        for i in range(self.threads):
            queue = Queue.Queue()
            thread = threading.Thread(target=self._thread_worker, args=(queue, ))
            thread.daemon = True
            thread.start()
            self.thread_objs.append(thread)
            self.thread_queues.append(queue) 
示例22
def __init__(self, url, protocols=None, extensions=None,
                     ssl_options=None, headers=None):
            """WebSocket client that executes into a eventlet green thread."""
            websocket.WebSocketBaseClient.__init__(self, url, protocols,
                                                   extensions,
                                                   ssl_options=ssl_options,
                                                   headers=headers)
            self._th = threading.Thread(
                target=self.run, name='WebSocketClient')
            self._th.daemon = True

            self.messages = queue.Queue() 
示例23
def __init__(self, moler_connection, echo=True, name=None, logger_name=""):
        """Initialization of FIFO-mem-threaded connection."""
        super(ThreadedFifoBuffer, self).__init__(moler_connection=moler_connection,
                                                 echo=echo,
                                                 name=name,
                                                 logger_name=logger_name)
        self.pulling_thread = None
        self.injections = Queue() 
示例24
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join() 
示例25
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join() 
示例26
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join() 
示例27
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join() 
示例28
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join() 
示例29
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join() 
示例30
def multithreaded_tid(func, args_list, num_thread, logfunc=None):
    assert 0 < num_thread
    n = len(args_list)
    args_list = [args if isinstance(args, list) or isinstance(args, tuple)
                 else (args, ) for args in args_list]
    q = queue.Queue()
    for i in range(n):
        q.put(i)
    p = queue.Queue()

    def parallel_work(tid):
        while True:
            try:
                i = q.get(block=False)
                p.put(i)
            except queue.Empty as e:
                return
            func(*args_list[i], tid=tid)
    threads = [threading.Thread(target=parallel_work, args=(i, ))
               for i in range(num_thread)]
    for t in threads:
        t.start()
    if logfunc is not None:
        while 0 < n:
            try:
                logfunc(*args_list[p.get(block=True, timeout=1)])
                n -= 1
            except queue.Empty as e:
                pass
    for t in threads:
        t.join()