Refactoring Non-idiomatic Code into Idiomatic Code

Examples:
Pythonic Idiom Non-idiomatic Code
List-Comprehension source code pull request

def input_files_in(self, path):
    input_files = []
    for root, dirs, files in os.walk(path):
        for file in files:
            if file.endswith(".pbxproj"):
                input_files.append(path + file)
    return input_files
List-Comprehension source code pull request

def test_stop_training_csv(self):
    # Test that using the CSVLogger callback with the TerminateOnNaN callback
    # does not result in invalid CSVs.
    np.random.seed(1337)
    tmpdir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

    with self.cached_session():
      fp = os.path.join(tmpdir, 'test.csv')
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)

      y_test = np_utils.to_categorical(y_test)
      y_train = np_utils.to_categorical(y_train)
      cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
      model = keras.models.Sequential()
      for _ in range(5):
        model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
      model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
      model.compile(loss='mean_squared_error',
                    optimizer='rmsprop')

      history = model.fit_generator(data_generator(),
                                    len(x_train) // BATCH_SIZE,
                                    validation_data=(x_test, y_test),
                                    callbacks=cbks,
                                    epochs=20)
      loss = history.history['loss']
      assert len(loss) > 1
      assert loss[-1] == np.inf or np.isnan(loss[-1])
    values = []
    with open(fp) as f:
        # On Windows, due to \r\n line ends, we may end up reading empty lines
        # after each line. Skip empty lines.
        for x in csv.reader(f):
            if x:
                values.append(x)
    assert 'nan' in values[-1], 'The last epoch was not logged.'
    
List-Comprehension source code pull request

def _flatten(self, list_of_lists):
    flat_list = []
    for sublist in list_of_lists:
        for item in sublist:
             flat_list.append(item)
    return flat_list
Set-Comprehension source code pull request

def _verify(self):
    """Check that the checkpoint has no duplicates and is consistent."""
    assert self.num_remaining_attacks == len(
        self.worklist
    ), "Recorded number of remaining attacks and size of worklist are different."
    results_set = set()
    for result in self.attack_log_manager.results:
        results_set.add(result.original_text)
    assert (
            len(results_set) == self.results_count
        ), "Duplicate `AttackResults` found."
              
Set-Comprehension source code pull request

def assertPanelActive(self, response, name):
    panels = set()
    for panelset in response.context['panels_by_tab'].values():
        for panel in panelset:
            panels.add(panel.name)
    self.assertIn(name, panels, "Panel %s not active in response" % name)
              
Set-Comprehension source code pull request

def test_queue_enumerate(self):
    f = poutine.trace(poutine.queue(self.model, queue=self.queue))
    trs = []
    while not self.queue.empty():
        trs.append(f.get_trace())
    assert len(trs) == 2 ** 3
    true_latents = set()
    for i1 in range(2):
        for i2 in range(2):
            for i3 in range(2):
                true_latents.add((i1, i2, i3))
    tr_latents = []
    for tr in trs:
        tr_latents.append(
            tuple(
                [
                    int(tr.nodes[name]["value"].view(-1).item())
                    for name in tr
                    if tr.nodes[name]["type"] == "sample"
                    and not tr.nodes[name]["is_observed"]
                ]
            )
        )
    assert true_latents == set(tr_latents)
              
Dict-Comprehension source code pull request

def get_conv_layers(module: Module) -> Dict[str, Module]:
    """
    :param module: the module to grab all conv layers for
    :return: a list of all the conv layers in the module
    """
    convs = {}
    for name, mod in module.named_modules():
        if isinstance(mod, _ConvNd):
            convs[name] = mod
    return  convs
              
Dict-Comprehension source code pull request

@staticmethod
def reverse_post_order_sort_nodes(graph, nodes=None):
    """
    Sort a given set of nodes in reverse post ordering.
    :param networkx.DiGraph graph: A local transition graph of a function.
    :param iterable nodes: A collection of nodes to sort.
    :return: A list of sorted nodes.
    :rtype: list
    """

    post_order = networkx.dfs_postorder_nodes(graph)

    if nodes is None:
        return reversed(list(post_order))
    addrs_to_index = {}
    for i, n in enumerate(post_order):
        addrs_to_index[n.addr] = i
    return sorted(nodes, key=lambda n: addrs_to_index[n.addr], reverse=True)
              
Dict-Comprehension source code pull request

def load_coco_names(file_name):
    names = {}
    with open(file_name) as f:
        for id, name in enumerate(f):
            names[id] = name
    return names
              
Chain-Comparison source code pull request

def get_unlocked_luks_containers_uuids():
    """
    Returns a list of LUKS container uuids backing open LUKS volumes.
    The method used is to first run:
    'dmsetup info --columns --noheadings -o name --target crypt' eg output:
    luks-82fd9db1-e1c1-488d-9b42-536d0a82caeb
    luks-3efb3830-fee1-4a9e-a5c6-ea456bfc269e
    luks-a47f4950-3296-4504-b9a4-2dc75681a6ad
    to get a list of open LUKS containers (--target crypt). If the usual naming
    convention is followed we have a name format of luks- with len = 41
    and we can extract the uuid of the LUKS container from it syntactically.
    If this naming convention is not matched then we fail over to calling:
    get_open_luks_container_dev() and then looking up that devices uuid via
    our uuid_name_map dictionary.
    :return: list containing the uuids of LUKS containers that have currently
    open volumes, or empty list if none open or an error occurred.
    """
    open_luks_container_uuids = []
    # flag to minimise calls to get_uuid_name_map()
    uuid_name_map_retrieved = False
    uuid_name_map = {}
    out, err, rc = run_command(
        [
            DMSETUP,
            "info",
            "--columns",
            "--noheadings",
            "--options",
            "name",
            "--target",
            "crypt",
        ]
    )
    if len(out) > 0 and rc == 0:
        # The output has at least one line and our dmsetup executed OK.
        for each_line in out:
            if each_line == "":
                continue
            backing_container_uuid = None
            if len(each_line) == 41 and re.match("luks-", each_line):
                # good chance on "luks-a47f4950-3296-4504-b9a4-2dc75681a6ad"
                # naming convention so strip uuid from this (cheap and quick)
                backing_container_uuid = each_line[5:]
            else:
                # More expensive two step process to retrieve uuid of LUKS
                # container backing this open LUKS volume.
                # Initial call to gain backing device name for our container
                container_dev = get_open_luks_container_dev(each_line)
                # strip leading /dev/ from device name if any returned.
                if container_dev is not "":
                    container_dev = container_dev.split("/")[-1]
                    # should now have name without path ie 'vdd' ready to
                    # index our uuid_name_map.
                    if not uuid_name_map_retrieved:
                        uuid_name_map = get_uuid_name_map()
                        uuid_name_map_retrieved = True
                    # second stage where we look up this devices uuid
                    backing_container_uuid = uuid_name_map[container_dev]
            # if a backing container uuid was found add it to our list
            if backing_container_uuid is not None:
                open_luks_container_uuids.append(backing_container_uuid)
    return open_luks_container_uuids
              
Chain-Comparison source code pull request

def _classification_function_3(
        salary, commission, age, elevel, car, zipcode, hvalue, hyears, loan
    ):
    if age < 40:
        if (elevel == 0) or (elevel == 1):
            return int((25000 <= salary) and (salary <= 75000))
        else:
            return int((50000 <= salary) and (salary <= 100000))
    elif age < 60:
        if (elevel == 1) or (elevel == 2) or (elevel == 3):
            return int((50000 <= salary) and (salary <= 100000))
        else:
            return int((75000 <= salary) and (salary <= 125000))
    else:
        if (elevel == 2) or (elevel == 3) or (elevel == 4):
            return int((50000 <= salary) and (salary <= 100000))
        else:
            return int((25000 <= salary) and (salary <= 75000))
              
Chain-Comparison source code pull request

def f_test(Y, f0, Y_em, R, alpha, m1, m2, m3, n1, n2, n3, n):
    """Use F-test for model selection."""
    # Initialize ind with zeros (all terms insignificant)
    select = np.zeros((n, 1))
    # Determine the significant components of the HDMR model via the F-test
    Y_res0 = Y - f0
    SSR0 = np.sum(np.square(Y_res0))
    p0 = 0
    for i in range(n):
        # model with ith term included
        Y_res1 = Y_res0 - Y_em[:, i].reshape(R, 1)
        # Number of parameters of proposed model (order dependent)
        if i <= n1:
            p1 = m1        # 1st order
        elif i > n1 and i <= (n1 + n2):
            p1 = m2        # 2nd order
        else:
            p1 = m3        # 3rd order
        # Calculate SSR of Y1
        SSR1 = np.sum(np.square(Y_res1))
        # Now calculate the F_stat (F_stat > 0 -> SSR1 < SSR0 )
        F_stat = ((SSR0 - SSR1) / (p1 - p0)) / (SSR1 / (R - p1))
        # Now calculate critical F value
        F_crit = stats.f.ppf(q=alpha, dfn=p1 - p0, dfd=R - p1)
        # Now determine whether to accept ith component into model
        if F_stat > F_crit:
            # ith term is significant and should be included in model
            select[i] = 1
    return select.reshape(n,)
              
Truth-Value-Test source code pull request

@pytest.mark.parametrize("use_combined_loader", [False, True])
def test_prefetch_iterator(use_combined_loader):
    """Test the DataFetcher with PyTorch IterableDataset."""
    class IterDataset(IterableDataset):
        def __iter__(self):
            yield 1
            yield 2
            yield 3
    for prefetch_batches in range(1, 5):
        if use_combined_loader:
            loader = CombinedLoader([DataLoader(IterDataset()), DataLoader(IterDataset())])
            expected = [
                ([tensor([1]), tensor([1])], False),
                ([tensor([2]), tensor([2])], False),
                ([tensor([3]), tensor([3])], True),
            ]
        else:
            loader = DataLoader(IterDataset())
            expected = [(1, False), (2, False), (3, True)]
        iterator = DataFetcher(prefetch_batches=prefetch_batches)
        assert iterator.prefetch_batches == prefetch_batches
        iterator.setup(loader)
        def generate():
            generated = []
            for idx, data in enumerate(iterator, 1):
                if iterator.done:
                    assert iterator.fetched == 3
                else:
                    assert iterator.fetched == (idx + prefetch_batches)
                generated.append(data)
            return generated
        assert generate() == expected
        # validate reset works properly.
        assert generate() == expected
        assert iterator.fetched == 3
    class EmptyIterDataset(IterableDataset):
        def __iter__(self):
            return iter([])
    dataloader = DataLoader(EmptyIterDataset())
    iterator = DataFetcher()
    iterator.setup(dataloader)
    assert list(iterator) == []
              
Truth-Value-Test source code pull request

def __init__(self, config):
    super().__init__()
    if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
        raise ValueError(
            f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
            f"heads ({config.num_attention_heads})"
        )
              
Truth-Value-Test source code pull request

def getMessages():
    global receiveTime, receiveRowid
    sql = '''SELECT rowid,text,date FROM message where text like \'%京东%验证码%\'  order by date desc limit 1'''
    result = curs.execute(sql)
    if len(list(result)) > 0:
        for i in curs.execute(sql):
            getTowid = i[0]
            getText = i[1]
            getTime = i[2]
            if getTowid > receiveRowid and getTime > receiveTime:
                receiveRowid = getTowid
                receiveTime = getTime
                code = getCode(getText)
                printT(f"收到最新验证码: {code}")
                if code != 0:
                    postCode(code)
              
Loop-Else source code pull request

def enum_small(self, multiplicities, ub):
    self.discarded = 0
    if ub <= 0:
        return
    self._initialize_enumeration(multiplicities)
    good_partition = True
    while self.spread_part_multiplicity():
        self.db_trace("spread 1")
        if self.lpart >= ub:
            self.discarded += 1
            good_partition = False
            self.db_trace(" Discarding")
            self.lpart = ub - 2
            break
    if good_partition:
        state = [self.f, self.lpart, self.pstack]
        yield state
    # M5 (Decrease v)
    while not self.decrement_part_small(self.top_part(), ub):
        self.db_trace("Failed decrement, going to backtrack")
        # M6 (Backtrack)
        if self.lpart == 0:
            return
        self.lpart -= 1
        self.db_trace("Backtracked to")
    self.db_trace("decrement ok, about to expand")
              
Loop-Else source code pull request

def get_project_file_name(window):
    """Getting project file name for ST2."""
    if not window.folders():
        return None
    projects = _get_projects_from_session()
    for project_file in projects:
        project_file = re.sub(r'^/([^/])/', '\\1:/', project_file)
        project_json = json.loads(file(project_file, 'r').read(), strict=False)
        if 'folders' in project_json:
            folders = project_json['folders']
            found_all = True
            for directory in window.folders():
                found = False
                for folder in folders:
                    folder_path = re.sub(r'^/([^/])/', '\\1:/', folder['path'])
                    if folder_path == directory.replace('\\', '/'):
                        found = True
                        break
                if not found:
                    found_all = False
                    break

        if found_all:
            return project_file
    return None
              
Loop-Else source code pull request


def __impl__(self, other_var):
    lhs_dtype = safe_get_dtype(self)

    if not isinstance(other_var, Variable):
        if reverse:
            has_batch_size = False
            for elem in self.shape:
                if elem < 0:
                    has_batch_size = True
                    break
            if not has_batch_size:
                other_var = create_tensor(
                    self.block,
                    other_var,
                    dtype=lhs_dtype,
                    shape=self.shape)
            else:
                other_var = create_tensor_with_batchsize(
                    self, other_var, lhs_dtype)
        else:
            # add fill_op to self.block
            other_var = create_scalar(
                self.block, value=other_var, dtype=lhs_dtype)
    rhs_dtype = safe_get_dtype(other_var)
    if lhs_dtype != rhs_dtype:
        other_var = astype(other_var, lhs_dtype)
    if reverse:
        tmp = self
        self = other_var
        other_var = tmp
    tmp_name = unique_tmp_name()
    out = self.block.create_var(name=tmp_name, dtype=lhs_dtype)
    axis = -1
    if other_var.shape[0] == -1:
        axis = 0
    assert len(self.shape) >= len(other_var.shape), (
        "The rank of the first argument of an binary operator cannot "
        "be smaller than the rank of its second argument: %s vs %s" %
        (len(self.shape), len(other_var.shape)))
    self.block.append_op(
        type=op_type,
        inputs={'X': [self],
                'Y': [other_var]},
        outputs={'Out': out},
        attrs={'axis': axis})
    return out
              
Assign-Multi-Targets source code pull request

def failure_handle(res, row_id):
    mogrified_sql = conn.mogrify(item['sql'], item['data'])
    mogrified_sql = mogrified_sql if mogrified_sql is not None \
        else item['sql']
    query_results.append({
        'status': False,
        'result': res,
        'sql': mogrified_sql,
        'rows_affected': 0,
        'row_added': None
    })

    if is_savepoint:
        sql = 'ROLLBACK TO SAVEPOINT save_data;'
        msg = 'A ROLLBACK was done for the save operation only. ' \
              'The active transaction is not affected.'
    else:
        sql = 'ROLLBACK;'
        msg = 'A ROLLBACK was done for the save transaction.'

    rollback_status, rollback_result = \
        execute_void_wrapper(conn, sql, query_results)
    if not rollback_status:
        return rollback_status, rollback_result, query_results, None

    # If we roll backed every thing then update the
    # message for each sql query.
    for query in query_results:
        if query['status']:
            query['result'] = msg

    return False, res, query_results, row_id
              
Assign-Multi-Targets source code pull request

def throttling_swap(d: list, e: int):
    """Swap positions of the 0'th and e'th elements in-place."""
    e = throttling_mod_func(d, e)
    f = d[0]
    d[0] = d[e]
    d[e] = f
              
Assign-Multi-Targets source code pull request

def FFT_bitreverse(N, data):
    n = N // 2
    nm1 = n - 1
    j = 0
    for i in range(nm1):
        ii = i << 1
        jj = j << 1
        k = n >> 1
        if i < j:
            tmp_real = data[ii]
            tmp_imag = data[ii + 1]
            data[ii] = data[jj]
            data[ii + 1] = data[jj + 1]
            data[jj] = tmp_real
            data[jj + 1] = tmp_imag
        while k <= j:
            j -= k
            k >>= 1
        j += k
              
Star-in-Func-Call source code pull request

def sha_transform(sha_info):
    W = []

    d = sha_info["data"]
    for i in range(0, 16):
        W.append(
            (d[8 * i] << 56)
            + (d[8 * i + 1] << 48)
            + (d[8 * i + 2] << 40)
            + (d[8 * i + 3] << 32)
            + (d[8 * i + 4] << 24)
            + (d[8 * i + 5] << 16)
            + (d[8 * i + 6] << 8)
            + d[8 * i + 7]
        )

    for i in range(16, 80):
        W.append(
            (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xFFFFFFFFFFFFFFFF
        )

    ss = sha_info["digest"][:]

    def RND(a, b, c, d, e, f, g, h, i, ki):
        t0 = (h + Sigma1(e) + Ch(e, f, g) + ki + W[i]) & 0xFFFFFFFFFFFFFFFF
        t1 = (Sigma0(a) + Maj(a, b, c)) & 0xFFFFFFFFFFFFFFFF
        d = (d + t0) & 0xFFFFFFFFFFFFFFFF
        h = (t0 + t1) & 0xFFFFFFFFFFFFFFFF
        return d & 0xFFFFFFFFFFFFFFFF, h & 0xFFFFFFFFFFFFFFFF

    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 0, 0x428A2F98D728AE22
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 1, 0x7137449123EF65CD
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 2, 0xB5C0FBCFEC4D3B2F
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 3, 0xE9B5DBA58189DBBC
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 4, 0x3956C25BF348B538
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 5, 0x59F111F1B605D019
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 6, 0x923F82A4AF194F9B
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 7, 0xAB1C5ED5DA6D8118
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 8, 0xD807AA98A3030242
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 9, 0x12835B0145706FBE
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 10, 0x243185BE4EE4B28C
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 11, 0x550C7DC3D5FFB4E2
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 12, 0x72BE5D74F27B896F
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 13, 0x80DEB1FE3B1696B1
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 14, 0x9BDC06A725C71235
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 15, 0xC19BF174CF692694
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 16, 0xE49B69C19EF14AD2
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 17, 0xEFBE4786384F25E3
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 18, 0x0FC19DC68B8CD5B5
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 19, 0x240CA1CC77AC9C65
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 20, 0x2DE92C6F592B0275
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 21, 0x4A7484AA6EA6E483
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 22, 0x5CB0A9DCBD41FBD4
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 23, 0x76F988DA831153B5
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 24, 0x983E5152EE66DFAB
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 25, 0xA831C66D2DB43210
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 26, 0xB00327C898FB213F
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 27, 0xBF597FC7BEEF0EE4
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 28, 0xC6E00BF33DA88FC2
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 29, 0xD5A79147930AA725
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 30, 0x06CA6351E003826F
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 31, 0x142929670A0E6E70
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 32, 0x27B70A8546D22FFC
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 33, 0x2E1B21385C26C926
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 34, 0x4D2C6DFC5AC42AED
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 35, 0x53380D139D95B3DF
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 36, 0x650A73548BAF63DE
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 37, 0x766A0ABB3C77B2A8
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 38, 0x81C2C92E47EDAEE6
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 39, 0x92722C851482353B
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 40, 0xA2BFE8A14CF10364
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 41, 0xA81A664BBC423001
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 42, 0xC24B8B70D0F89791
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 43, 0xC76C51A30654BE30
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 44, 0xD192E819D6EF5218
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 45, 0xD69906245565A910
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 46, 0xF40E35855771202A
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 47, 0x106AA07032BBD1B8
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 48, 0x19A4C116B8D2D0C8
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 49, 0x1E376C085141AB53
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 50, 0x2748774CDF8EEB99
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 51, 0x34B0BCB5E19B48A8
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 52, 0x391C0CB3C5C95A63
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 53, 0x4ED8AA4AE3418ACB
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 54, 0x5B9CCA4F7763E373
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 55, 0x682E6FF3D6B2B8A3
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 56, 0x748F82EE5DEFB2FC
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 57, 0x78A5636F43172F60
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 58, 0x84C87814A1F0AB72
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 59, 0x8CC702081A6439EC
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 60, 0x90BEFFFA23631E28
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 61, 0xA4506CEBDE82BDE9
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 62, 0xBEF9A3F7B2C67915
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 63, 0xC67178F2E372532B
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 64, 0xCA273ECEEA26619C
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 65, 0xD186B8C721C0C207
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 66, 0xEADA7DD6CDE0EB1E
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 67, 0xF57D4F7FEE6ED178
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 68, 0x06F067AA72176FBA
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 69, 0x0A637DC5A2C898A6
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 70, 0x113F9804BEF90DAE
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 71, 0x1B710B35131C471B
    )
    ss[3], ss[7] = RND(
        ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], 72, 0x28DB77F523047D84
    )
    ss[2], ss[6] = RND(
        ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], 73, 0x32CAAB7B40C72493
    )
    ss[1], ss[5] = RND(
        ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], ss[5], 74, 0x3C9EBE0A15C9BEBC
    )
    ss[0], ss[4] = RND(
        ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], ss[4], 75, 0x431D67C49C100D4C
    )
    ss[7], ss[3] = RND(
        ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], ss[3], 76, 0x4CC5D4BECB3E42B6
    )
    ss[6], ss[2] = RND(
        ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], ss[2], 77, 0x597F299CFC657E2A
    )
    ss[5], ss[1] = RND(
        ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], ss[1], 78, 0x5FCB6FAB3AD6FAEC
    )
    ss[4], ss[0] = RND(
        ss[1], ss[2], ss[3], ss[4], ss[5], ss[6], ss[7], ss[0], 79, 0x6C44198C4A475817
    )

    dig = []
    for i, x in enumerate(sha_info["digest"]):
        dig.append((x + ss[i]) & 0xFFFFFFFFFFFFFFFF)
    sha_info["digest"] = dig
              
Star-in-Func-Call source code pull request

def check_charbb(self, charbb):
    xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
    xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
    ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
    ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
    return (
        xmaxs - xmins > self.min_proposal_size
        and ymaxs - ymins > self.min_proposal_size
    )
              
Star-in-Func-Call source code pull request

def p_varargslist_v10(self, p):
    """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt COMMA POW vfpdef"""
    # x, *args, **kwargs
    p0 = ast.arguments(
        posonlyargs=[],
        args=[],
        vararg=None,
        kwonlyargs=[],
        kw_defaults=[],
        kwarg=p[9],
        defaults=[],
    )
    self._set_regular_args(p0, p[1], p[2], p[3], p[4])
    self._set_var_args(p0, p[6], None)
    p[0] = p0
              
For-Multi-Targets source code pull request

def run(self, edit):
    # somewhat custom blame command:
    # -w: ignore whitespace changes
    # -M: retain blame when moving lines
    # -C: retain blame when copying lines between files
    command = ['git', 'blame', '-w', '-M', '-C']
    line_ranges = [self.get_lines(selection) for selection in self.view.sel() if not selection.empty()]

    if line_ranges:
         for line_range in line_ranges:
             command.extend(('-L', str(line_range[0]) + ',' + str(line_range[1])))
         callback = self.blame_done
    else:
         callback = functools.partial(self.blame_done,
                                     focused_line=self.get_current_line())
    command.append(self.get_file_name())
    self.run_command(command, callback)
              
For-Multi-Targets source code pull request

def _al_create_and_start_processes(self, tasks_data):
    #0. Create output queues
    for data in tasks_data:
        task_id = data[1]
        queue = Queue(1)
        self._task_output_queues[task_id] = queue

    self._termination_event = Event()

    #1. Initialize tasks
    tasks = []
    for data in tasks_data:
        node = data[0]
        node_id = data[1]
        parent_node_id = data[2]
        is_last = data[3]
        #1.1 Creating messenger for task
        task_queue = self._task_output_queues.get(node_id)
        if parent_node_id is not None:
            parent_task_queue = self._task_output_queues.get(parent_node_id)
        else:
            parent_task_queue = None

        messenger = BatchprocessingQueueMessenger(node, task_queue, parent_task_queue, self._termination_event)

        if isinstance(node, ProducerNode):
            task = ProducerTask(node, messenger, node_id, is_last)
            tasks.append(task)

        elif isinstance(node, ProcessorNode):
            if node.nb_tasks > 1:
                receiveQueue = Queue(1)
                accountingQueue = Queue()
                output_queues = [Queue() for _ in range(node.nb_tasks)]

                # Create receive task
                receive_task = MultiprocessingReceiveTask(
                    node,
                    parent_task_queue,
                    receiveQueue,
                    BATCH
                )
                tasks.append(receive_task)

                # Create processor tasks
                mp_tasks_lock = Lock()
                for idx in range(node.nb_tasks):
                    mp_task = MultiprocessingProcessorTask(
                        idx,
                        node,
                        mp_tasks_lock,
                        receiveQueue,
                        accountingQueue,
                        output_queues[idx]
                    )
                    tasks.append(mp_task)

                # Create output task
                output_task = MultiprocessingOutputTask(
                    node,
                    task_queue,
                    accountingQueue,
                    output_queues,
                    BATCH,
                    is_last
                )
                tasks.append(output_task)
            else:
                task = ProcessorTask(
                    node,
                    messenger,
                    node_id,
                    is_last,
                    parent_node_id
                )
                tasks.append(task)

        elif isinstance(node, ConsumerNode):
            task = ConsumerTask(
                node,
                messenger,
                node_id,
                is_last,
                parent_node_id
            )
            tasks.append(task)

    #2. Create processes
    for task in tasks:
        if isinstance(task, ProcessorTask) or isinstance(task, MultiprocessingProcessorTask):
            if task.device_type == GPU:
                self._next_gpu_index += 1
                if self._next_gpu_index < self._nb_available_gpus:
                    proc = create_process_task_gpu(task, self._gpu_ids[self._next_gpu_index])
                else:
                    try:
                        task.change_device(CPU)
                        proc = create_process_task(task)
                    except:
                        raise RuntimeError('No GPU available to allocate {}'.format(str(task._processor)))
            else:
                proc = create_process_task(task)
        else:
            proc = create_process_task(task)
        self._procs.append(proc)

    #3. Start processes.
    for proc in self._procs:
        proc.start()

              
For-Multi-Targets source code pull request

def plot_pose(pose):
    """Plot the 3D pose showing the joint connections."""
    import mpl_toolkits.mplot3d.axes3d as p3

    _CONNECTION = [
        [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],
        [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15],
        [15, 16]]
    assert (pose.ndim == 2)
    assert (pose.shape[0] == 3)
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    for c in _CONNECTION:
        col = '#%02x%02x%02x' % joint_color(c[0])
        ax.plot([pose[0, c[0]], pose[0, c[1]]],
                [pose[1, c[0]], pose[1, c[1]]],
                [pose[2, c[0]], pose[2, c[1]]], c=col)
    for j in range(pose.shape[1]):
        col = '#%02x%02x%02x' % joint_color(j)
        ax.scatter(pose[0, j], pose[1, j], pose[2, j],
                   c=col, marker='o', edgecolor=col)
    smallest = pose.min()
    largest = pose.max()
    ax.set_xlim3d(smallest, largest)
    ax.set_ylim3d(smallest, largest)
    ax.set_zlim3d(smallest, largest)

    return fig