Python源码示例:six.moves.queue.get()
示例1
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
# alias
示例2
def test_process():
from multiprocessing import Queue
from lib.config import cfg
from lib.data_io import category_model_id_pair
cfg.TRAIN.PAD_X = 10
cfg.TRAIN.PAD_Y = 10
data_queue = Queue(2)
category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])
data_process = ReconstructionDataProcess(data_queue, category_model_pair)
data_process.start()
batch_img, batch_voxel = data_queue.get()
kill_processes(data_queue, [data_process])
示例3
def _request_wrapper(self, queue, url, params, timeout):
"""
Wrapper to requests used by each thread.
Parameters
----------
queue : Queue.Queue
The Queue to write the response from the request in.
url : str
The URL to be queried.
params : dict
A dictionary of parameters to pass to the request.
timeout : int
Timeout to wait for a response to the request.
"""
response = self.session.get(url, params=params, verify=self.verify, timeout=timeout)
queue.put(response)
示例4
def receive(self, conn_name=(None, None), timeout=30):
"""
Receive a FIX message from the given connection.
The connection name defaults to ``(None, None)``. In this case,
the server will try to find the one and only available connection.
This will fail if there are more connections available or if the initial
connection is no longer active.
:param conn_name: Connection name to receive message from
:type conn_name: ``tuple`` of ``str`` and ``str``
:param timeout: timeout in seconds
:type timeout: ``int``
:return: Fix message received
:rtype: ``FixMessage``
"""
conn_name = self._validate_connection_name(conn_name)
return self._conndetails_by_name[conn_name].queue.get(True, timeout)
示例5
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
示例6
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
# alias
示例7
def test_process():
from multiprocessing import Queue
from lib.config import cfg
from lib.data_io import category_model_id_pair
cfg.TRAIN.PAD_X = 10
cfg.TRAIN.PAD_Y = 10
data_queue = Queue(2)
category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])
data_process = ReconstructionDataProcess(data_queue, category_model_pair)
data_process.start()
batch_img, batch_voxel = data_queue.get()
kill_processes(data_queue, [data_process])
示例8
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
# alias
示例9
def _recv(self):
return self._out_queue.get()
示例10
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))
示例11
def kill_processes(queue, processes):
print('Signal processes')
for p in processes:
p.shutdown()
print('Empty queue')
while not queue.empty():
time.sleep(0.5)
queue.get(False)
print('kill processes')
for p in processes:
p.terminate()
示例12
def data_resources(self):
"""
List of all available data resources, meaning resources that return data.
"""
if self._data_resources:
return list(self._data_resources.keys())
data_resources_url = six.moves.urllib.parse.urljoin(self.base_url, "data.json")
response = self.session.get(data_resources_url, verify=self.verify)
response_json = self._parse(response)
for entry in response_json["queryResponse"]["entityType"]:
self._data_resources[entry["$"]] = "%s.json" % entry["@url"]
return list(self._data_resources.keys())
示例13
def service_resources(self):
"""
List of all available service resources, meaning resources that modify the NMS.
"""
if self._service_resources:
return list(self._service_resources.keys())
service_resources_url = six.moves.urllib.parse.urljoin(self.base_url, "op.json")
response = self.session.get(service_resources_url, verify=self.verify)
response_json = self._parse(response)
for entry in response_json["queryResponse"]["operation"]:
self._service_resources[entry["$"]] = {"method": entry["@httpMethod"], "url": six.moves.urllib.parse.urljoin(self.base_url, "op/%s.json" % entry["@path"])}
return list(self._service_resources.keys())
示例14
def _flush_queue(self, queue):
"""
Flush the given receive queue.
:param queue: Queue to flush.
:type queue: ``queue``
"""
try:
while True:
queue.get(False)
except queue.Empty:
return
示例15
def _recv(self):
return self._out_queue.get()
示例16
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))
self._guard = DataFlowReentrantGuard()
示例17
def _recv(self):
return self._out_queue.get()
示例18
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))
示例19
def kill_processes(queue, processes):
print('Signal processes')
for p in processes:
p.shutdown()
print('Empty queue')
while not queue.empty():
time.sleep(0.5)
queue.get(False)
print('kill processes')
for p in processes:
p.terminate()
示例20
def _recv(self):
return self._out_queue.get()
示例21
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))