Python源码示例:pip..Link()

示例1
def _get_pages(self, locations, project_name):
        # type: (Iterable[Link], str) -> Iterable[HTMLPage]
        """
        Yields (page, page_url) from the given locations, skipping
        locations that have errors.
        """
        seen = set()  # type: Set[Link]
        for location in locations:
            if location in seen:
                continue
            seen.add(location)

            page = _get_html_page(location, session=self.session)
            if page is None:
                continue

            yield page 
示例2
def iter_links(self):
        # type: () -> Iterable[Link]
        """Yields all links in the page"""
        document = html5lib.parse(
            self.content,
            transport_encoding=_get_encoding_from_headers(self.headers),
            namespaceHTMLElements=False,
        )
        base_url = _determine_base_url(document, self.url)
        for anchor in document.findall(".//a"):
            if anchor.get("href"):
                href = anchor.get("href")
                url = _clean_link(urllib_parse.urljoin(base_url, href))
                pyrequire = anchor.get('data-requires-python')
                pyrequire = unescape(pyrequire) if pyrequire else None
                yield Link(url, self.url, requires_python=pyrequire) 
示例3
def _check_download_dir(link, download_dir, hashes):
    # type: (Link, str, Hashes) -> Optional[str]
    """ Check download_dir for previously downloaded file with correct hash
        If a correct file is found return its path else None
    """
    download_path = os.path.join(download_dir, link.filename)
    if os.path.exists(download_path):
        # If already downloaded, does its hash match?
        logger.info('File was already downloaded %s', download_path)
        if hashes:
            try:
                hashes.check_against_path(download_path)
            except HashMismatch:
                logger.warning(
                    'Previously-downloaded file %s has bad hash. '
                    'Re-downloading.',
                    download_path
                )
                os.unlink(download_path)
                return None
        return download_path
    return None 
示例4
def _get_cache_path_parts(self, link):
        # type: (Link) -> List[str]
        """Get parts of part that must be os.path.joined with cache_dir
        """

        # We want to generate an url to use as our cache key, we don't want to
        # just re-use the URL because it might have other items in the fragment
        # and we don't care about those.
        key_parts = [link.url_without_fragment]
        if link.hash_name is not None and link.hash is not None:
            key_parts.append("=".join([link.hash_name, link.hash]))
        key_url = "#".join(key_parts)

        # Encode our key url with sha224, we'll use this because it has similar
        # security properties to sha256, but with a shorter total output (and
        # thus less secure). However the differences don't make a lot of
        # difference for our use case here.
        hashed = hashlib.sha224(key_url.encode()).hexdigest()

        # We want to nest the directories some to prevent having a ton of top
        # level directories where we might run out of sub directories on some
        # FS.
        parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]

        return parts 
示例5
def _get_candidates(self, link, package_name):
        # type: (Link, Optional[str]) -> List[Any]
        can_not_cache = (
            not self.cache_dir or
            not package_name or
            not link
        )
        if can_not_cache:
            return []

        canonical_name = canonicalize_name(package_name)
        formats = self.format_control.get_allowed_formats(
            canonical_name
        )
        if not self.allowed_formats.intersection(formats):
            return []

        root = self.get_path_for_link(link)
        try:
            return os.listdir(root)
        except OSError as err:
            if err.errno in {errno.ENOENT, errno.ENOTDIR}:
                return []
            raise 
示例6
def get_path_for_link(self, link):
        # type: (Link) -> str
        """Return a directory to store cached wheels for link

        Because there are M wheels for any one sdist, we provide a directory
        to cache them in, and then consult that directory when looking up
        cache hits.

        We only insert things into the cache if they have plausible version
        numbers, so that we don't contaminate the cache with things that were
        not unique. E.g. ./package might have dozens of installs done for it
        and build a version of 0.0...and if we built and cached a wheel, we'd
        end up using the same wheel even if the source has been edited.

        :param link: The link of the sdist for which this will cache wheels.
        """
        parts = self._get_cache_path_parts(link)

        # Store wheels within the root cache_dir
        return os.path.join(self.cache_dir, "wheels", *parts) 
示例7
def get(self, link, package_name):
        # type: (Link, Optional[str]) -> Link
        candidates = []

        for wheel_name in self._get_candidates(link, package_name):
            try:
                wheel = Wheel(wheel_name)
            except InvalidWheelFilename:
                continue
            if not wheel.supported():
                # Built for a different python/arch/etc
                continue
            candidates.append((wheel.support_index_min(), wheel_name))

        if not candidates:
            return link

        return self._link_for_candidate(link, min(candidates)[1]) 
示例8
def _get_pages(self, locations, project_name):
        # type: (Iterable[Link], str) -> Iterable[HTMLPage]
        """
        Yields (page, page_url) from the given locations, skipping
        locations that have errors.
        """
        seen = set()  # type: Set[Link]
        for location in locations:
            if location in seen:
                continue
            seen.add(location)

            page = _get_html_page(location, session=self.session)
            if page is None:
                continue

            yield page 
示例9
def iter_links(self):
        # type: () -> Iterable[Link]
        """Yields all links in the page"""
        document = html5lib.parse(
            self.content,
            transport_encoding=_get_encoding_from_headers(self.headers),
            namespaceHTMLElements=False,
        )
        base_url = _determine_base_url(document, self.url)
        for anchor in document.findall(".//a"):
            if anchor.get("href"):
                href = anchor.get("href")
                url = _clean_link(urllib_parse.urljoin(base_url, href))
                pyrequire = anchor.get('data-requires-python')
                pyrequire = unescape(pyrequire) if pyrequire else None
                yield Link(url, self.url, requires_python=pyrequire) 
示例10
def _check_download_dir(link, download_dir, hashes):
    # type: (Link, str, Hashes) -> Optional[str]
    """ Check download_dir for previously downloaded file with correct hash
        If a correct file is found return its path else None
    """
    download_path = os.path.join(download_dir, link.filename)
    if os.path.exists(download_path):
        # If already downloaded, does its hash match?
        logger.info('File was already downloaded %s', download_path)
        if hashes:
            try:
                hashes.check_against_path(download_path)
            except HashMismatch:
                logger.warning(
                    'Previously-downloaded file %s has bad hash. '
                    'Re-downloading.',
                    download_path
                )
                os.unlink(download_path)
                return None
        return download_path
    return None 
示例11
def _get_cache_path_parts(self, link):
        # type: (Link) -> List[str]
        """Get parts of part that must be os.path.joined with cache_dir
        """

        # We want to generate an url to use as our cache key, we don't want to
        # just re-use the URL because it might have other items in the fragment
        # and we don't care about those.
        key_parts = [link.url_without_fragment]
        if link.hash_name is not None and link.hash is not None:
            key_parts.append("=".join([link.hash_name, link.hash]))
        key_url = "#".join(key_parts)

        # Encode our key url with sha224, we'll use this because it has similar
        # security properties to sha256, but with a shorter total output (and
        # thus less secure). However the differences don't make a lot of
        # difference for our use case here.
        hashed = hashlib.sha224(key_url.encode()).hexdigest()

        # We want to nest the directories some to prevent having a ton of top
        # level directories where we might run out of sub directories on some
        # FS.
        parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]

        return parts 
示例12
def _get_candidates(self, link, package_name):
        # type: (Link, Optional[str]) -> List[Any]
        can_not_cache = (
            not self.cache_dir or
            not package_name or
            not link
        )
        if can_not_cache:
            return []

        canonical_name = canonicalize_name(package_name)
        formats = self.format_control.get_allowed_formats(
            canonical_name
        )
        if not self.allowed_formats.intersection(formats):
            return []

        root = self.get_path_for_link(link)
        try:
            return os.listdir(root)
        except OSError as err:
            if err.errno in {errno.ENOENT, errno.ENOTDIR}:
                return []
            raise 
示例13
def get_path_for_link(self, link):
        # type: (Link) -> str
        """Return a directory to store cached wheels for link

        Because there are M wheels for any one sdist, we provide a directory
        to cache them in, and then consult that directory when looking up
        cache hits.

        We only insert things into the cache if they have plausible version
        numbers, so that we don't contaminate the cache with things that were
        not unique. E.g. ./package might have dozens of installs done for it
        and build a version of 0.0...and if we built and cached a wheel, we'd
        end up using the same wheel even if the source has been edited.

        :param link: The link of the sdist for which this will cache wheels.
        """
        parts = self._get_cache_path_parts(link)

        # Store wheels within the root cache_dir
        return os.path.join(self.cache_dir, "wheels", *parts) 
示例14
def get(self, link, package_name):
        # type: (Link, Optional[str]) -> Link
        candidates = []

        for wheel_name in self._get_candidates(link, package_name):
            try:
                wheel = Wheel(wheel_name)
            except InvalidWheelFilename:
                continue
            if not wheel.supported():
                # Built for a different python/arch/etc
                continue
            candidates.append((wheel.support_index_min(), wheel_name))

        if not candidates:
            return link

        return self._link_for_candidate(link, min(candidates)[1]) 
示例15
def _get_http_response_filename(resp, link):
    # type: (Response, Link) -> str
    """Get an ideal filename from the given HTTP response, falling back to
    the link filename if not provided.
    """
    filename = link.filename  # fallback
    # Have a look at the Content-Disposition header for a better guess
    content_disposition = resp.headers.get('content-disposition')
    if content_disposition:
        filename = parse_content_disposition(content_disposition, filename)
    ext = splitext(filename)[1]  # type: Optional[str]
    if not ext:
        ext = mimetypes.guess_extension(
            resp.headers.get('content-type', '')
        )
        if ext:
            filename += ext
    if not ext and link.url != resp.url:
        ext = os.path.splitext(resp.url)[1]
        if ext:
            filename += ext
    return filename 
示例16
def _download_http_url(
    link,  # type: Link
    downloader,  # type: Downloader
    temp_dir,  # type: str
    hashes,  # type: Optional[Hashes]
):
    # type: (...) -> Tuple[str, str]
    """Download link url into temp_dir using provided session"""
    download = downloader(link)

    file_path = os.path.join(temp_dir, download.filename)
    with open(file_path, 'wb') as content_file:
        for chunk in download.chunks:
            content_file.write(chunk)

    if hashes:
        hashes.check_against_path(file_path)

    return file_path, download.response.headers.get('content-type', '') 
示例17
def _check_download_dir(link, download_dir, hashes):
    # type: (Link, str, Optional[Hashes]) -> Optional[str]
    """ Check download_dir for previously downloaded file with correct hash
        If a correct file is found return its path else None
    """
    download_path = os.path.join(download_dir, link.filename)

    if not os.path.exists(download_path):
        return None

    # If already downloaded, does its hash match?
    logger.info('File was already downloaded %s', download_path)
    if hashes:
        try:
            hashes.check_against_path(download_path)
        except HashMismatch:
            logger.warning(
                'Previously-downloaded file %s has bad hash. '
                'Re-downloading.',
                download_path
            )
            os.unlink(download_path)
            return None
    return download_path 
示例18
def parse_req_from_editable(editable_req):
    # type: (str) -> RequirementParts
    name, url, extras_override = parse_editable(editable_req)

    if name is not None:
        try:
            req = Requirement(name)
        except InvalidRequirement:
            raise InstallationError("Invalid requirement: '%s'" % name)
    else:
        req = None

    link = Link(url)

    return RequirementParts(req, link, None, extras_override)


# ---- The actual constructors follow ---- 
示例19
def _sort_links(self, links):
        # type: (Iterable[Link]) -> List[Link]
        """
        Returns elements of links in order, non-egg links first, egg links
        second, while eliminating duplicates
        """
        eggs, no_eggs = [], []
        seen = set()  # type: Set[Link]
        for link in links:
            if link not in seen:
                seen.add(link)
                if link.egg_fragment:
                    eggs.append(link)
                else:
                    no_eggs.append(link)
        return no_eggs + eggs 
示例20
def get_install_candidate(self, link_evaluator, link):
        # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]
        """
        If the link is a candidate for install, convert it to an
        InstallationCandidate and return it. Otherwise, return None.
        """
        is_candidate, result = link_evaluator.evaluate_link(link)
        if not is_candidate:
            if result:
                self._log_skipped_link(link, reason=result)
            return None

        return InstallationCandidate(
            name=link_evaluator.project_name,
            link=link,
            # Convert the Text result to str since InstallationCandidate
            # accepts str.
            version=str(result),
        ) 
示例21
def process_project_url(self, project_url, link_evaluator):
        # type: (Link, LinkEvaluator) -> List[InstallationCandidate]
        logger.debug(
            'Fetching project page and analyzing links: %s', project_url,
        )
        html_page = self._link_collector.fetch_page(project_url)
        if html_page is None:
            return []

        page_links = list(parse_links(html_page))

        with indent_log():
            package_links = self.evaluate_links(
                link_evaluator,
                links=page_links,
            )

        return package_links 
示例22
def parse_links(page):
    # type: (HTMLPage) -> Iterable[Link]
    """
    Parse an HTML document, and yield its anchor elements as Link objects.
    """
    document = html5lib.parse(
        page.content,
        transport_encoding=page.encoding,
        namespaceHTMLElements=False,
    )

    url = page.url
    base_url = _determine_base_url(document, url)
    for anchor in document.findall(".//a"):
        link = _create_link_from_element(
            anchor,
            page_url=url,
            base_url=base_url,
        )
        if link is None:
            continue
        yield link 
示例23
def __init__(
        self,
        files,         # type: List[Link]
        find_links,    # type: List[Link]
        project_urls,  # type: List[Link]
    ):
        # type: (...) -> None
        """
        :param files: Links from file locations.
        :param find_links: Links from find_links.
        :param project_urls: URLs to HTML project pages, as described by
            the PEP 503 simple repository API.
        """
        self.files = files
        self.find_links = find_links
        self.project_urls = project_urls 
示例24
def _is_url_like_archive(url):
    # type: (str) -> bool
    """Return whether the URL looks like an archive.
    """
    filename = Link(url).filename
    for bad_ext in ARCHIVE_EXTENSIONS:
        if filename.endswith(bad_ext):
            return True
    return False 
示例25
def _handle_get_page_fail(
    link,  # type: Link
    reason,  # type: Union[str, Exception]
    meth=None  # type: Optional[Callable[..., None]]
):
    # type: (...) -> None
    if meth is None:
        meth = logger.debug
    meth("Could not fetch URL %s: %s - skipping", link, reason) 
示例26
def _sort_key(self, candidate):
        # type: (InstallationCandidate) -> CandidateSortingKey
        """
        Function used to generate link sort key for link tuples.
        The greater the return value, the more preferred it is.
        If not finding wheels, then sorted by version only.
        If finding wheels, then the sort order is by version, then:
          1. existing installs
          2. wheels ordered via Wheel.support_index_min(self._valid_tags)
          3. source archives
        If prefer_binary was set, then all wheels are sorted above sources.
        Note: it was considered to embed this logic into the Link
              comparison operators, but then different sdist links
              with the same version, would have to be considered equal
        """
        support_num = len(self._valid_tags)
        build_tag = tuple()  # type: BuildTag
        binary_preference = 0
        if candidate.location.is_wheel:
            # can raise InvalidWheelFilename
            wheel = Wheel(candidate.location.filename)
            if not wheel.supported(self._valid_tags):
                raise UnsupportedWheel(
                    "%s is not a supported wheel for this platform. It "
                    "can't be sorted." % wheel.filename
                )
            if self._prefer_binary:
                binary_preference = 1
            pri = -(wheel.support_index_min(self._valid_tags))
            if wheel.build_tag is not None:
                match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
                build_tag_groups = match.groups()
                build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
        else:  # sdist
            pri = -(support_num)
        return (binary_preference, candidate.version, build_tag, pri) 
示例27
def _package_versions(
        self,
        links,  # type: Iterable[Link]
        search  # type: Search
    ):
        # type: (...) -> List[Optional[InstallationCandidate]]
        result = []
        for link in self._sort_links(links):
            v = self._link_package_versions(link, search)
            if v is not None:
                result.append(v)
        return result 
示例28
def _log_skipped_link(self, link, reason):
        # type: (Link, str) -> None
        if link not in self.logged_links:
            logger.debug('Skipping link %s; %s', link, reason)
            self.logged_links.add(link) 
示例29
def is_vcs_url(link):
    # type: (Link) -> bool
    return bool(_get_used_vcs_backend(link)) 
示例30
def is_file_url(link):
    # type: (Link) -> bool
    return link.url.lower().startswith('file:')