首页
学习
活动
专区
圈层
工具
发布
社区首页 >专栏 >python cpu count_Python multiprocessing.cpu_count方法代码示例

python cpu count_Python multiprocessing.cpu_count方法代码示例

作者头像
用户7886150
修改2021-01-07 10:25:39
修改2021-01-07 10:25:39
7940
举报
文章被收录于专栏:bit哲学院bit哲学院

参考链接: Python中的numpy.logical_or

本文整理汇总了Python中multiprocessing.cpu_count方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.cpu_count方法的具体用法?Python multiprocessing.cpu_count怎么用?Python multiprocessing.cpu_count使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块multiprocessing的用法示例。

 在下文中一共展示了multiprocessing.cpu_count方法的30个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

 示例1: get_graph_stats

 ​点赞 7

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def get_graph_stats(graph_obj_handle, prop='degrees'):

 # if prop == 'degrees':

 num_cores = multiprocessing.cpu_count()

 inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]

 res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))

 stat_dict = {}

 if 'degrees' in prop:

 stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))

 if 'edge_labels' in prop:

 stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))

 if 'target_mean' in prop or 'target_std' in prop:

 param = np.array([file_res['params'] for core_res in res for file_res in core_res])

 if 'target_mean' in prop:

 stat_dict['target_mean'] = np.mean(param, axis=0)

 if 'target_std' in prop:

 stat_dict['target_std'] = np.std(param, axis=0)

 return stat_dict

 开发者ID:priba,项目名称:nmp_qc,代码行数:22,

 示例2: get_cpuusage

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def get_cpuusage(filename,field_values,which_dict):

 cpuusage_file = open(os.path.join(homepath,datadir,filename))

 lines = cpuusage_file.read().split("\n")

 cpu_dict={}

 cpu_count = multiprocessing.cpu_count()

 for i in range(0,cpu_count):

 cpucore = "cpu"+str(i)

 cpu_dict[cpucore] = {}

 for eachline in lines:

 tokens_split = eachline.split("=")

 if(len(tokens_split) == 1):

 continue

 cpucoresplit = tokens_split[0].split("$")

 cpu_dict[cpucoresplit[0]][cpucoresplit[1]] = float(tokens_split[1])

 totalresult = 0

 for i in range(0,cpu_count):

 cpucore = "cpu"+str(i)

 which_dict["cpu_usage"] = cpu_dict

 Total = cpu_dict[cpucore]["user"] + cpu_dict[cpucore]["nice"] + cpu_dict[cpucore]["system"] + cpu_dict[cpucore]["idle"] + cpu_dict[cpucore]["iowait"] + cpu_dict[cpucore]["irq"] + cpu_dict[cpucore]["softirq"]

 idle = cpu_dict[cpucore]["idle"] + cpu_dict[cpucore]["iowait"]

 field_values[0] = "CPU"

 result = 1 - round(float(idle/Total),4)

 totalresult += float(result)

 field_values.append(totalresult*100)

 开发者ID:insightfinder,项目名称:InsightAgent,代码行数:26,

 示例3: train

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def train(env_id, num_timesteps, seed, policy):

 ncpu = multiprocessing.cpu_count()

 if sys.platform == 'darwin': ncpu //= 2

 config = tf.ConfigProto(allow_soft_placement=True,

 intra_op_parallelism_threads=ncpu,

 inter_op_parallelism_threads=ncpu)

 config.gpu_options.allow_growth = True #pylint: disable=E1101

 tf.Session(config=config).__enter__()

 env = VecFrameStack(make_atari_env(env_id, 8, seed), 4)

 policy = {'cnn' : CnnPolicy, 'lstm' : LstmPolicy, 'lnlstm' : LnLstmPolicy}[policy]

 ppo2.learn(policy=policy, env=env, nsteps=128, nminibatches=4,

 lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,

 ent_coef=.01,

 lr=lambda f : f * 2.5e-4,

 cliprange=lambda f : f * 0.1,

 total_timesteps=int(num_timesteps * 1.1))

 开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:20,

 示例4: scrape_recipe_box

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def scrape_recipe_box(scraper, site_str, page_iter, status_interval=50):

 if args.append:

 recipes = quick_load(site_str)

 else:

 recipes = {}

 start = time.time()

 if args.multi:

 pool = Pool(cpu_count() * 2)

 results = pool.map(scraper, page_iter)

 for r in results:

 recipes.update(r)

 else:

 for i in page_iter:

 recipes.update(scraper(i))

 if i % status_interval == 0:

 print('Scraping page {} of {}'.format(i, max(page_iter)))

 quick_save(site_str, recipes)

 time.sleep(args.sleep)

 print('Scraped {} recipes from {} in {:.0f} minutes'.format(

 len(recipes), site_str, (time.time() - start) / 60))

 quick_save(site_str, recipes)

 开发者ID:rtlee9,项目名称:recipe-box,代码行数:25,

 示例5: test_multiprocessing

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def test_multiprocessing(app):

 """Tests that the number of children we produce is correct"""

 # Selects a number at random so we can spot check

 num_workers = random.choice(range(2, multiprocessing.cpu_count() * 2 + 1))

 process_list = set()

 def stop_on_alarm(*args):

 for process in multiprocessing.active_children():

 process_list.add(process.pid)

 process.terminate()

 signal.signal(signal.SIGALRM, stop_on_alarm)

 signal.alarm(3)

 app.run(HOST, PORT, workers=num_workers)

 assert len(process_list) == num_workers

 开发者ID:huge-success,项目名称:sanic,代码行数:18,

 示例6: test_multiprocessing_with_blueprint

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def test_multiprocessing_with_blueprint(app):

 # Selects a number at random so we can spot check

 num_workers = random.choice(range(2, multiprocessing.cpu_count() * 2 + 1))

 process_list = set()

 def stop_on_alarm(*args):

 for process in multiprocessing.active_children():

 process_list.add(process.pid)

 process.terminate()

 signal.signal(signal.SIGALRM, stop_on_alarm)

 signal.alarm(3)

 bp = Blueprint("test_text")

 app.blueprint(bp)

 app.run(HOST, PORT, workers=num_workers)

 assert len(process_list) == num_workers

 # this function must be outside a test function so that it can be

 # able to be pickled (local functions cannot be pickled).

 开发者ID:huge-success,项目名称:sanic,代码行数:24,

 示例7: load_config

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def load_config(config_data):

 config_data['pywren']['runtime'] = RUNTIME_NAME_DEFAULT

 config_data['pywren']['runtime_memory'] = None

 if 'runtime_timeout' not in config_data['pywren']:

 config_data['pywren']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT

 if 'storage_backend' not in config_data['pywren']:

 config_data['pywren']['storage_backend'] = 'localhost'

 if 'localhost' not in config_data:

 config_data['localhost'] = {}

 if 'ibm_cos' in config_data and 'private_endpoint' in config_data['ibm_cos']:

 del config_data['ibm_cos']['private_endpoint']

 if 'workers' in config_data['pywren']:

 config_data['localhost']['workers'] = config_data['pywren']['workers']

 else:

 total_cores = multiprocessing.cpu_count()

 config_data['pywren']['workers'] = total_cores

 config_data['localhost']['workers'] = total_cores

 开发者ID:pywren,项目名称:pywren-ibm-cloud,代码行数:23,

 示例8: get_params_for_mp

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def get_params_for_mp(n_triples):

 n_cores = mp.cpu_count()

 pool = mp.Pool(n_cores)

 avg = n_triples // n_cores

 range_list = []

 start = 0

 for i in range(n_cores):

 num = avg + 1 if i < n_triples - avg * n_cores else avg

 range_list.append([start, start + num])

 start += num

 return n_cores, pool, range_list

 # input: [(h1, {t1, t2 ...}), (h2, {t3 ...}), ...]

 # output: {(h1, t1): paths, (h1, t2): paths, (h2, t3): paths, ...}

 开发者ID:hwwang55,项目名称:PathCon,代码行数:19,

 示例9: cpu_count

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def cpu_count():

 """Return the number of CPU cores."""

 try:

 return multiprocessing.cpu_count()

 # TODO: remove except clause once we support only python >= 2.6

 except NameError:

 ## This code part is taken from parallel python.

 # Linux, Unix and MacOS

 if hasattr(os, "sysconf"):

 if "SC_NPROCESSORS_ONLN" in os.sysconf_names:

 # Linux & Unix

 n_cpus = os.sysconf("SC_NPROCESSORS_ONLN")

 if isinstance(n_cpus, int) and n_cpus > 0:

 return n_cpus

 else:

 # OSX

 return int(os.popen2("sysctl -n hw.ncpu")[1].read())

 # Windows

 if "NUMBER_OF_PROCESSORS" in os.environ:

 n_cpus = int(os.environ["NUMBER_OF_PROCESSORS"])

 if n_cpus > 0:

 return n_cpus

 # Default

 return 1

 开发者ID:ME-ICA,项目名称:me-ica,代码行数:26,

 示例10: create_parser

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def create_parser():

 parser = ArgumentParser(description=__doc__,

 formatter_class=RawDescriptionHelpFormatter)

 parser.add_argument('--debug', action='store_true')

 parser.add_argument('--delimiter')

 parser.add_argument('--embedding-size', default=200, type=int)

 parser.add_argument('--graph-path')

 parser.add_argument('--has-header', action='store_true')

 parser.add_argument('--input', '-i', dest='infile', required=True)

 parser.add_argument('--log-level', '-l', type=str.upper, default='INFO')

 parser.add_argument('--num-walks', default=1, type=int)

 parser.add_argument('--model', '-m', dest='model_path')

 parser.add_argument('--output', '-o', dest='outfile', required=True)

 parser.add_argument('--stats', action='store_true')

 parser.add_argument('--undirected', action='store_true')

 parser.add_argument('--walk-length', default=10, type=int)

 parser.add_argument('--window-size', default=5, type=int)

 parser.add_argument('--workers', default=multiprocessing.cpu_count(),

 type=int)

 return parser

 开发者ID:jwplayer,项目名称:jwalk,代码行数:22,

 示例11: load_settings

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def load_settings():

 with open('SETTINGS.json') as f:

 settings = json.load(f)

 data_dir = str(settings['competition-data-dir'])

 cache_dir = str(settings['data-cache-dir'])

 submission_dir = str(settings['submission-dir'])

 N_jobs = str(settings['num-jobs'])

 N_jobs = multiprocessing.cpu_count() if N_jobs == 'auto' else int(N_jobs)

 for d in (cache_dir, submission_dir):

 try:

 os.makedirs(d)

 except:

 pass

 return Settings(data_dir=data_dir, cache_dir=cache_dir, submission_dir=submission_dir, N_jobs=N_jobs)

 开发者ID:MichaelHills,项目名称:seizure-prediction,代码行数:19,

 示例12: train_reader

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def train_reader(train_list_path):

 def reader():

 with open(train_list_path, 'r') as f:

 lines = f.readlines()

 # 打乱数据

 np.random.shuffle(lines)

 # 开始获取每张图像和标签

 for line in lines:

 data, label = line.split('\t')

 yield data, label

 return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 1024)

 # 测试数据的预处理

 开发者ID:yeyupiaoling,项目名称:LearnPaddle2,代码行数:18,

 示例13: train_reader

 ​点赞 6

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def train_reader(train_list_path, crop_size, resize_size):

 father_path = os.path.dirname(train_list_path)

 def reader():

 with open(train_list_path, 'r') as f:

 lines = f.readlines()

 # 打乱图像列表

 np.random.shuffle(lines)

 # 开始获取每张图像和标签

 for line in lines:

 img, label = line.split('\t')

 img = os.path.join(father_path, img)

 yield img, label, crop_size, resize_size

 return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 102400)

 # 测试图片的预处理

 开发者ID:yeyupiaoling,项目名称:LearnPaddle2,代码行数:20,

 示例14: cpu_count_physical

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def cpu_count_physical():

 """

 tries to get the number of physical (ie not virtual) cores

 """

 try:

 import psutil

 return psutil.cpu_count(logical=False)

 except:

 import multiprocessing

 return multiprocessing.cpu_count()

 开发者ID:svviz,项目名称:svviz,代码行数:12,

 示例15: _n_workers_for_local_cluster

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def _n_workers_for_local_cluster(calcs):

 """The number of workers used in a LocalCluster

 An upper bound is set at the cpu_count or the number of calcs submitted,

 depending on which is smaller. This is to prevent more workers from

 being started than needed (but also to prevent too many workers from

 being started in the case that a large number of calcs are submitted).

 """

 return min(cpu_count(), len(calcs))

 开发者ID:spencerahill,项目名称:aospy,代码行数:11,

 示例16: test_n_workers_for_local_cluster

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def test_n_workers_for_local_cluster(calcsuite_init_specs_two_calcs):

 calcs = CalcSuite(calcsuite_init_specs_two_calcs).create_calcs()

 expected = min(cpu_count(), len(calcs))

 result = _n_workers_for_local_cluster(calcs)

 assert result == expected

 开发者ID:spencerahill,项目名称:aospy,代码行数:7,

 示例17: cpu_count

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def cpu_count():

 """Return the cpu count."""

 try:

 import multiprocessing

 count = multiprocessing.cpu_count()

 except Exception:

 print("Using fallback CPU count", file=sys.stderr)

 count = 4

 return count

 开发者ID:ContinuumIO,项目名称:ciocheck,代码行数:11,

 示例18: parse_args

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def parse_args():

 """Parses command line arguments."""

 parser = argparse.ArgumentParser(

 description='Tool to download dataset images.')

 parser.add_argument('--input_file', required=True,

 help='Location of dataset.csv')

 parser.add_argument('--output_dir', required=True,

 help='Output path to download images')

 parser.add_argument('--threads', default=multiprocessing.cpu_count() + 1,

 help='Number of threads to use')

 args = parser.parse_args()

 return args.input_file, args.output_dir, int(args.threads)

 开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:14,

 示例19: __init__

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def __init__(self, max_processes=mp.cpu_count()):

 """

 Execute several functions (threads, processes) in parallel until return values called.

 @param max_processes: maximum number of tasks that will be run in parallel at the same time

 """

 assert isinstance(max_processes, int)

 # prevent overwrite of previous settings

 if AsyncParallel.pool is not None:

 return

 AsyncParallel.pool = mp.Pool(processes=max_processes)

 AsyncParallel.max_processes = max_processes

 开发者ID:CAMI-challenge,项目名称:CAMISIM,代码行数:14,

 示例20: runThreadParallel

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def runThreadParallel(threadTaskList, maxThreads=mp.cpu_count()):

 """

 Execute several functions (threads, processes) in parallel.

 @type threadTaskList: list of TaskThread

 @param maxThreads: maximum number of tasks that will be run in parallel at the same time

 @return: a list of respective return values

 """

 assert isinstance(threadTaskList, list)

 assert isinstance(maxThreads, int)

 # creates a pool of workers, add all tasks to the pool

 pool = mp.Pool(processes=maxThreads)

 taskHandlerList = []

 for task in threadTaskList:

 assert isinstance(task, TaskThread)

 taskHandlerList.append(pool.apply_async(task.fun, task.args))

 # finish all tasks

 pool.close()

 pool.join()

 # retrieve the return values

 retValList = []

 for taskHandler in taskHandlerList:

 taskHandler.wait()

 # assert taskHandler.successful()

 retValList.append(taskHandler.get())

 return retValList

 开发者ID:CAMI-challenge,项目名称:CAMISIM,代码行数:32,

 示例21: runCmdParallel

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def runCmdParallel(cmdTaskList, maxProc=mp.cpu_count(), stdInErrLock=mp.Manager().Lock()):

 """

 Run several command line commands in parallel.

 @attention: use the Manager to get the lock as in this function definition !!!

 @param cmdTaskList: list of command line tasks

 @type cmdTaskList: list of TaskCmd

 @param maxProc: maximum number of tasks that will be run in parallel at the same time

 @param stdInErrLock: acquiring the lock enables writing to the stdout and stderr

 @return: list of failed commands, dictionary (cmd, task process)

 """

 assert isinstance(cmdTaskList, list)

 assert isinstance(maxProc, int)

 threadTaskList = []

 for cmdTask in cmdTaskList:

 assert isinstance(cmdTask, TaskCmd)

 threadTaskList.append(TaskThread(_runCmd, (cmdTask, stdInErrLock)))

 returnValueList = runThreadParallel(threadTaskList, maxProc)

 failList = []

 for process, task in returnValueList:

 if process.returncode != 0:

 failList.append(dict(process=process, task=task))

 if len(failList) > 0:

 return failList

 else:

 return None

 开发者ID:CAMI-challenge,项目名称:CAMISIM,代码行数:34,

 示例22: __set_runtime_ncores

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def __set_runtime_ncores(self, ncores):

 if ncores is None:

 ncores = INT_TYPE(1)

 else:

 assert is_integer(ncores), LOGGER.error("ncores must be an integer")

 ncores = INT_TYPE(ncores)

 assert ncores>0, LOGGER.error("ncores must be > 0")

 if ncores > multiprocessing.cpu_count():

 LOGGER.warn("ncores '%s' is reset to %s which is the number of available cores on your machine"%(ncores, multiprocessing.cpu_count()))

 ncores = INT_TYPE(multiprocessing.cpu_count())

 self._runtime_ncores = ncores

 开发者ID:bachiraoun,项目名称:fullrmc,代码行数:13,

 示例23: sample_normalize

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def sample_normalize(self, k_samples=1000, overwrite=False):

 """ Estimate the mean and std of the features from the training set

 Params:

 k_samples (int): Use this number of samples for estimation

 """

 log = LogUtil().getlogger()

 log.info("Calculating mean and std from samples")

 # if k_samples is negative then it goes through total dataset

 if k_samples < 0:

 audio_paths = self.audio_paths

 # using sample

 else:

 k_samples = min(k_samples, len(self.train_audio_paths))

 samples = self.rng.sample(self.train_audio_paths, k_samples)

 audio_paths = samples

 manager = Manager()

 return_dict = manager.dict()

 jobs = []

 for threadIndex in range(cpu_count()):

 proc = Process(target=self.preprocess_sample_normalize, args=(threadIndex, audio_paths, overwrite, return_dict))

 jobs.append(proc)

 proc.start()

 for proc in jobs:

 proc.join()

 feat = np.sum(np.vstack([item['feat'] for item in return_dict.values()]), axis=0)

 count = sum([item['count'] for item in return_dict.values()])

 feat_squared = np.sum(np.vstack([item['feat_squared'] for item in return_dict.values()]), axis=0)

 self.feats_mean = feat / float(count)

 self.feats_std = np.sqrt(feat_squared / float(count) - np.square(self.feats_mean))

 np.savetxt(

 generate_file_path(self.save_dir, self.model_name, 'feats_mean'), self.feats_mean)

 np.savetxt(

 generate_file_path(self.save_dir, self.model_name, 'feats_std'), self.feats_std)

 log.info("End calculating mean and std from samples")

 开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:39,

 示例24: scan

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def scan(urls):

 """scan multiple websites with multi processing"""

 vulnerables = []

 results = {} # store scanned results

 childs = [] # store child processes

 max_processes = multiprocessing.cpu_count() * 2

 pool = multiprocessing.Pool(max_processes, init)

 for url in urls:

 def callback(result, url=url):

 results[url] = result

 childs.append(pool.apply_async(__sqli, (url, ), callback=callback))

 try:

 while True:

 time.sleep(0.5)

 if all([child.ready() for child in childs]):

 break

 except KeyboardInterrupt:

 std.stderr("stopping sqli scanning process")

 pool.terminate()

 pool.join()

 else:

 pool.close()

 pool.join()

 for url, result in results.items():

 if result[0] == True:

 vulnerables.append((url, result[1]))

 return vulnerables

 开发者ID:the-robot,项目名称:sqliv,代码行数:35,

 示例25: check

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def check(urls):

 """get many domains' server info with multi processing"""

 domains_info = [] # return in list for termtable input

 results = {} # store results

 childs = [] # store child processes

 max_processes = multiprocessing.cpu_count() * 2

 pool = multiprocessing.Pool(max_processes, init)

 for url in urls:

 def callback(result, url=url):

 results[url] = result

 childs.append(pool.apply_async(__getserverinfo, (url, ), callback=callback))

 try:

 while True:

 time.sleep(0.5)

 if all([child.ready() for child in childs]):

 break

 except KeyboardInterrupt:

 std.stderr("skipping server info scanning process")

 pool.terminate()

 pool.join()

 else:

 pool.close()

 pool.join()

 # if user skipped the process, some may not have information

 # so put - for empty data

 for url in urls:

 if url in results.keys():

 data = results.get(url)

 domains_info.append([url, data[0], data[1]])

 continue

 domains_info.append([url, '', ''])

 return domains_info

 开发者ID:the-robot,项目名称:sqliv,代码行数:41,

 示例26: _get_trial_result_list

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def _get_trial_result_list(self, param_sweep: Iterable[OptimizationParams],

 identifiers: Optional[Iterable[Hashable]],

 reevaluate_final_params: bool, save_x_vals: bool,

 seeds: Optional[Sequence[int]],

 num_processes: Optional[int]

 ) -> List[OptimizationTrialResult]:

 if num_processes is None:

 # coverage: ignore

 num_processes = multiprocessing.cpu_count()

 pool = multiprocessing.Pool(num_processes)

 try:

 arg_tuples = ((self.ansatz, self.objective,

 self._preparation_circuit, self.initial_state,

 optimization_params, reevaluate_final_params,

 save_x_vals, seeds[0] if seeds is not None else

 numpy.random.randint(2**16),

 self.ansatz.default_initial_params(),

 self._black_box_type)

 for optimization_params in param_sweep)

 result_list = pool.map(_run_optimization, arg_tuples)

 trial_results = [

 OptimizationTrialResult([result], optimization_params)

 for optimization_params, result in zip(param_sweep, result_list)

 ]

 finally:

 pool.terminate()

 return trial_results

 开发者ID:quantumlib,项目名称:OpenFermion-Cirq,代码行数:31,

 示例27: _get_result_list

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def _get_result_list(self,

 optimization_params,

 reevaluate_final_params: bool,

 save_x_vals: bool,

 repetitions: int = 1,

 seeds: Optional[Sequence[int]] = None,

 use_multiprocessing: bool = False,

 num_processes: Optional[int] = None

 ) -> List[OptimizationResult]:

 if use_multiprocessing:

 if num_processes is None:

 num_processes = multiprocessing.cpu_count()

 pool = multiprocessing.Pool(num_processes)

 try:

 arg_tuples = ((self.ansatz, self.objective,

 self._preparation_circuit, self.initial_state,

 optimization_params, reevaluate_final_params,

 save_x_vals, seeds[i] if seeds is not None else

 numpy.random.randint(2**16),

 self.ansatz.default_initial_params(),

 self._black_box_type)

 for i in range(repetitions))

 result_list = pool.map(_run_optimization, arg_tuples)

 finally:

 pool.terminate()

 else:

 result_list = []

 for i in range(repetitions):

 result = _run_optimization(

 (self.ansatz, self.objective, self._preparation_circuit,

 self.initial_state, optimization_params,

 reevaluate_final_params, save_x_vals, seeds[i]

 if seeds is not None else numpy.random.randint(2**16),

 self.ansatz.default_initial_params(),

 self._black_box_type))

 result_list.append(result)

 return result_list

 开发者ID:quantumlib,项目名称:OpenFermion-Cirq,代码行数:41,

 示例28: get_cpuusage

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def get_cpuusage(filename,field_values,which_dict):

 cpuusage_file = open(os.path.join(homepath,datadir,filename))

 lines = cpuusage_file.read().split("\n")

 cpu_dict={}

 if len(lines) == 1:

 return

 cpu_count = multiprocessing.cpu_count()

 for i in range(0,cpu_count):

 cpucore = "cpu"+str(i)

 cpu_dict[cpucore] = {}

 for eachline in lines:

 tokens_split = eachline.split("=")

 if(len(tokens_split) == 1):

 continue

 cpucoresplit = tokens_split[0].split("$")

 cpu_dict[cpucoresplit[0]][cpucoresplit[1]] = float(tokens_split[1])

 totalresult = 0

 for i in range(0,cpu_count):

 cpucore = "cpu"+str(i)

 which_dict["cpu_usage"] = cpu_dict

 Total = cpu_dict[cpucore]["user"] + cpu_dict[cpucore]["nice"] + cpu_dict[cpucore]["system"] + cpu_dict[cpucore]["idle"] + cpu_dict[cpucore]["iowait"] + cpu_dict[cpucore]["irq"] + cpu_dict[cpucore]["softirq"]

 idle = cpu_dict[cpucore]["idle"] + cpu_dict[cpucore]["iowait"]

 field_values[0] = "CPU"

 result = 1 - round(float(idle/Total),4)

 totalresult += float(result)

 field_values.append(totalresult*100)

 开发者ID:insightfinder,项目名称:InsightAgent,代码行数:28,

 示例29: make_session

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def make_session(num_cpu=None, make_default=False):

 """Returns a session that will use CPU's only"""

 if num_cpu is None:

 num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))

 tf_config = tf.ConfigProto(

 inter_op_parallelism_threads=num_cpu,

 intra_op_parallelism_threads=num_cpu)

 tf_config.gpu_options.allocator_type = 'BFC'

 tf_config.gpu_options.allow_growth = True

 if make_default:

 return tf.InteractiveSession(config=tf_config)

 else:

 return tf.Session(config=tf_config)

 开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:15,

 示例30: _check_njobs

 ​点赞 5

 ​

 # 需要导入模块: import multiprocessing [as 别名]

 # 或者: from multiprocessing import cpu_count [as 别名]

 def _check_njobs(njobs):

 if njobs < 1:

 njobs = multiprocessing.cpu_count()

 if njobs is None:

 return 1

 assert isinstance(njobs, int)

 assert njobs >= 1

 return njobs

 开发者ID:david-cortes,项目名称:contextualbandits,代码行数:10,

 注:本文中的multiprocessing.cpu_count方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

本文系转载,前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文系转载前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档