link.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. from __future__ import annotations
  2. import functools
  3. import itertools
  4. import logging
  5. import os
  6. import posixpath
  7. import re
  8. import urllib.parse
  9. from collections.abc import Mapping
  10. from dataclasses import dataclass
  11. from typing import (
  12. TYPE_CHECKING,
  13. Any,
  14. NamedTuple,
  15. )
  16. from pip._internal.utils.deprecation import deprecated
  17. from pip._internal.utils.filetypes import WHEEL_EXTENSION
  18. from pip._internal.utils.hashes import Hashes
  19. from pip._internal.utils.misc import (
  20. pairwise,
  21. redact_auth_from_url,
  22. split_auth_from_netloc,
  23. splitext,
  24. )
  25. from pip._internal.utils.urls import path_to_url, url_to_path
  26. if TYPE_CHECKING:
  27. from pip._internal.index.collector import IndexContent
  28. logger = logging.getLogger(__name__)
  29. # Order matters, earlier hashes have a precedence over later hashes for what
  30. # we will pick to use.
  31. _SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5")
  32. @dataclass(frozen=True)
  33. class LinkHash:
  34. """Links to content may have embedded hash values. This class parses those.
  35. `name` must be any member of `_SUPPORTED_HASHES`.
  36. This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to
  37. be JSON-serializable to conform to PEP 610, this class contains the logic for
  38. parsing a hash name and value for correctness, and then checking whether that hash
  39. conforms to a schema with `.is_hash_allowed()`."""
  40. name: str
  41. value: str
  42. _hash_url_fragment_re = re.compile(
  43. # NB: we do not validate that the second group (.*) is a valid hex
  44. # digest. Instead, we simply keep that string in this class, and then check it
  45. # against Hashes when hash-checking is needed. This is easier to debug than
  46. # proactively discarding an invalid hex digest, as we handle incorrect hashes
  47. # and malformed hashes in the same place.
  48. r"[#&]({choices})=([^&]*)".format(
  49. choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)
  50. ),
  51. )
  52. def __post_init__(self) -> None:
  53. assert self.name in _SUPPORTED_HASHES
  54. @classmethod
  55. @functools.cache
  56. def find_hash_url_fragment(cls, url: str) -> LinkHash | None:
  57. """Search a string for a checksum algorithm name and encoded output value."""
  58. match = cls._hash_url_fragment_re.search(url)
  59. if match is None:
  60. return None
  61. name, value = match.groups()
  62. return cls(name=name, value=value)
  63. def as_dict(self) -> dict[str, str]:
  64. return {self.name: self.value}
  65. def as_hashes(self) -> Hashes:
  66. """Return a Hashes instance which checks only for the current hash."""
  67. return Hashes({self.name: [self.value]})
  68. def is_hash_allowed(self, hashes: Hashes | None) -> bool:
  69. """
  70. Return True if the current hash is allowed by `hashes`.
  71. """
  72. if hashes is None:
  73. return False
  74. return hashes.is_hash_allowed(self.name, hex_digest=self.value)
  75. @dataclass(frozen=True)
  76. class MetadataFile:
  77. """Information about a core metadata file associated with a distribution."""
  78. hashes: dict[str, str] | None
  79. def __post_init__(self) -> None:
  80. if self.hashes is not None:
  81. assert all(name in _SUPPORTED_HASHES for name in self.hashes)
  82. def supported_hashes(hashes: dict[str, str] | None) -> dict[str, str] | None:
  83. # Remove any unsupported hash types from the mapping. If this leaves no
  84. # supported hashes, return None
  85. if hashes is None:
  86. return None
  87. hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES}
  88. if not hashes:
  89. return None
  90. return hashes
  91. def _clean_url_path_part(part: str) -> str:
  92. """
  93. Clean a "part" of a URL path (i.e. after splitting on "@" characters).
  94. """
  95. # We unquote prior to quoting to make sure nothing is double quoted.
  96. return urllib.parse.quote(urllib.parse.unquote(part))
  97. def _clean_file_url_path(part: str) -> str:
  98. """
  99. Clean the first part of a URL path that corresponds to a local
  100. filesystem path (i.e. the first part after splitting on "@" characters).
  101. """
  102. # We unquote prior to quoting to make sure nothing is double quoted.
  103. # Also, on Windows the path part might contain a drive letter which
  104. # should not be quoted. On Linux where drive letters do not
  105. # exist, the colon should be quoted. We rely on urllib.request
  106. # to do the right thing here.
  107. ret = urllib.request.pathname2url(urllib.request.url2pathname(part))
  108. if ret.startswith("///"):
  109. # Remove any URL authority section, leaving only the URL path.
  110. ret = ret.removeprefix("//")
  111. return ret
  112. # percent-encoded: /
  113. _reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
  114. def _clean_url_path(path: str, is_local_path: bool) -> str:
  115. """
  116. Clean the path portion of a URL.
  117. """
  118. if is_local_path:
  119. clean_func = _clean_file_url_path
  120. else:
  121. clean_func = _clean_url_path_part
  122. # Split on the reserved characters prior to cleaning so that
  123. # revision strings in VCS URLs are properly preserved.
  124. parts = _reserved_chars_re.split(path)
  125. cleaned_parts = []
  126. for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
  127. cleaned_parts.append(clean_func(to_clean))
  128. # Normalize %xx escapes (e.g. %2f -> %2F)
  129. cleaned_parts.append(reserved.upper())
  130. return "".join(cleaned_parts)
  131. def _ensure_quoted_url(url: str) -> str:
  132. """
  133. Make sure a link is fully quoted.
  134. For example, if ' ' occurs in the URL, it will be replaced with "%20",
  135. and without double-quoting other characters.
  136. """
  137. # Split the URL into parts according to the general structure
  138. # `scheme://netloc/path?query#fragment`.
  139. result = urllib.parse.urlsplit(url)
  140. # If the netloc is empty, then the URL refers to a local filesystem path.
  141. is_local_path = not result.netloc
  142. path = _clean_url_path(result.path, is_local_path=is_local_path)
  143. # Temporarily replace scheme with file to ensure the URL generated by
  144. # urlunsplit() contains an empty netloc (file://) as per RFC 1738.
  145. ret = urllib.parse.urlunsplit(result._replace(scheme="file", path=path))
  146. ret = result.scheme + ret[4:] # Restore original scheme.
  147. return ret
  148. def _absolute_link_url(base_url: str, url: str) -> str:
  149. """
  150. A faster implementation of urllib.parse.urljoin with a shortcut
  151. for absolute http/https URLs.
  152. """
  153. if url.startswith(("https://", "http://")):
  154. return url
  155. else:
  156. return urllib.parse.urljoin(base_url, url)
  157. @functools.total_ordering
  158. class Link:
  159. """Represents a parsed link from a Package Index's simple URL"""
  160. __slots__ = [
  161. "_parsed_url",
  162. "_url",
  163. "_path",
  164. "_hashes",
  165. "comes_from",
  166. "requires_python",
  167. "yanked_reason",
  168. "metadata_file_data",
  169. "cache_link_parsing",
  170. "egg_fragment",
  171. ]
  172. def __init__(
  173. self,
  174. url: str,
  175. comes_from: str | IndexContent | None = None,
  176. requires_python: str | None = None,
  177. yanked_reason: str | None = None,
  178. metadata_file_data: MetadataFile | None = None,
  179. cache_link_parsing: bool = True,
  180. hashes: Mapping[str, str] | None = None,
  181. ) -> None:
  182. """
  183. :param url: url of the resource pointed to (href of the link)
  184. :param comes_from: instance of IndexContent where the link was found,
  185. or string.
  186. :param requires_python: String containing the `Requires-Python`
  187. metadata field, specified in PEP 345. This may be specified by
  188. a data-requires-python attribute in the HTML link tag, as
  189. described in PEP 503.
  190. :param yanked_reason: the reason the file has been yanked, if the
  191. file has been yanked, or None if the file hasn't been yanked.
  192. This is the value of the "data-yanked" attribute, if present, in
  193. a simple repository HTML link. If the file has been yanked but
  194. no reason was provided, this should be the empty string. See
  195. PEP 592 for more information and the specification.
  196. :param metadata_file_data: the metadata attached to the file, or None if
  197. no such metadata is provided. This argument, if not None, indicates
  198. that a separate metadata file exists, and also optionally supplies
  199. hashes for that file.
  200. :param cache_link_parsing: A flag that is used elsewhere to determine
  201. whether resources retrieved from this link should be cached. PyPI
  202. URLs should generally have this set to False, for example.
  203. :param hashes: A mapping of hash names to digests to allow us to
  204. determine the validity of a download.
  205. """
  206. # The comes_from, requires_python, and metadata_file_data arguments are
  207. # only used by classmethods of this class, and are not used in client
  208. # code directly.
  209. # url can be a UNC windows share
  210. if url.startswith("\\\\"):
  211. url = path_to_url(url)
  212. self._parsed_url = urllib.parse.urlsplit(url)
  213. # Store the url as a private attribute to prevent accidentally
  214. # trying to set a new value.
  215. self._url = url
  216. # The .path property is hot, so calculate its value ahead of time.
  217. self._path = urllib.parse.unquote(self._parsed_url.path)
  218. link_hash = LinkHash.find_hash_url_fragment(url)
  219. hashes_from_link = {} if link_hash is None else link_hash.as_dict()
  220. if hashes is None:
  221. self._hashes = hashes_from_link
  222. else:
  223. self._hashes = {**hashes, **hashes_from_link}
  224. self.comes_from = comes_from
  225. self.requires_python = requires_python if requires_python else None
  226. self.yanked_reason = yanked_reason
  227. self.metadata_file_data = metadata_file_data
  228. self.cache_link_parsing = cache_link_parsing
  229. self.egg_fragment = self._egg_fragment()
  230. @classmethod
  231. def from_json(
  232. cls,
  233. file_data: dict[str, Any],
  234. page_url: str,
  235. ) -> Link | None:
  236. """
  237. Convert an pypi json document from a simple repository page into a Link.
  238. """
  239. file_url = file_data.get("url")
  240. if file_url is None:
  241. return None
  242. url = _ensure_quoted_url(_absolute_link_url(page_url, file_url))
  243. pyrequire = file_data.get("requires-python")
  244. yanked_reason = file_data.get("yanked")
  245. hashes = file_data.get("hashes", {})
  246. # PEP 714: Indexes must use the name core-metadata, but
  247. # clients should support the old name as a fallback for compatibility.
  248. metadata_info = file_data.get("core-metadata")
  249. if metadata_info is None:
  250. metadata_info = file_data.get("dist-info-metadata")
  251. # The metadata info value may be a boolean, or a dict of hashes.
  252. if isinstance(metadata_info, dict):
  253. # The file exists, and hashes have been supplied
  254. metadata_file_data = MetadataFile(supported_hashes(metadata_info))
  255. elif metadata_info:
  256. # The file exists, but there are no hashes
  257. metadata_file_data = MetadataFile(None)
  258. else:
  259. # False or not present: the file does not exist
  260. metadata_file_data = None
  261. # The Link.yanked_reason expects an empty string instead of a boolean.
  262. if yanked_reason and not isinstance(yanked_reason, str):
  263. yanked_reason = ""
  264. # The Link.yanked_reason expects None instead of False.
  265. elif not yanked_reason:
  266. yanked_reason = None
  267. return cls(
  268. url,
  269. comes_from=page_url,
  270. requires_python=pyrequire,
  271. yanked_reason=yanked_reason,
  272. hashes=hashes,
  273. metadata_file_data=metadata_file_data,
  274. )
  275. @classmethod
  276. def from_element(
  277. cls,
  278. anchor_attribs: dict[str, str | None],
  279. page_url: str,
  280. base_url: str,
  281. ) -> Link | None:
  282. """
  283. Convert an anchor element's attributes in a simple repository page to a Link.
  284. """
  285. href = anchor_attribs.get("href")
  286. if not href:
  287. return None
  288. url = _ensure_quoted_url(_absolute_link_url(base_url, href))
  289. pyrequire = anchor_attribs.get("data-requires-python")
  290. yanked_reason = anchor_attribs.get("data-yanked")
  291. # PEP 714: Indexes must use the name data-core-metadata, but
  292. # clients should support the old name as a fallback for compatibility.
  293. metadata_info = anchor_attribs.get("data-core-metadata")
  294. if metadata_info is None:
  295. metadata_info = anchor_attribs.get("data-dist-info-metadata")
  296. # The metadata info value may be the string "true", or a string of
  297. # the form "hashname=hashval"
  298. if metadata_info == "true":
  299. # The file exists, but there are no hashes
  300. metadata_file_data = MetadataFile(None)
  301. elif metadata_info is None:
  302. # The file does not exist
  303. metadata_file_data = None
  304. else:
  305. # The file exists, and hashes have been supplied
  306. hashname, sep, hashval = metadata_info.partition("=")
  307. if sep == "=":
  308. metadata_file_data = MetadataFile(supported_hashes({hashname: hashval}))
  309. else:
  310. # Error - data is wrong. Treat as no hashes supplied.
  311. logger.debug(
  312. "Index returned invalid data-dist-info-metadata value: %s",
  313. metadata_info,
  314. )
  315. metadata_file_data = MetadataFile(None)
  316. return cls(
  317. url,
  318. comes_from=page_url,
  319. requires_python=pyrequire,
  320. yanked_reason=yanked_reason,
  321. metadata_file_data=metadata_file_data,
  322. )
  323. def __str__(self) -> str:
  324. if self.requires_python:
  325. rp = f" (requires-python:{self.requires_python})"
  326. else:
  327. rp = ""
  328. if self.comes_from:
  329. return f"{self.redacted_url} (from {self.comes_from}){rp}"
  330. else:
  331. return self.redacted_url
  332. def __repr__(self) -> str:
  333. return f"<Link {self}>"
  334. def __hash__(self) -> int:
  335. return hash(self.url)
  336. def __eq__(self, other: Any) -> bool:
  337. if not isinstance(other, Link):
  338. return NotImplemented
  339. return self.url == other.url
  340. def __lt__(self, other: Any) -> bool:
  341. if not isinstance(other, Link):
  342. return NotImplemented
  343. return self.url < other.url
  344. @property
  345. def url(self) -> str:
  346. return self._url
  347. @property
  348. def redacted_url(self) -> str:
  349. return redact_auth_from_url(self.url)
  350. @property
  351. def filename(self) -> str:
  352. path = self.path.rstrip("/")
  353. name = posixpath.basename(path)
  354. if not name:
  355. # Make sure we don't leak auth information if the netloc
  356. # includes a username and password.
  357. netloc, user_pass = split_auth_from_netloc(self.netloc)
  358. return netloc
  359. name = urllib.parse.unquote(name)
  360. assert name, f"URL {self._url!r} produced no filename"
  361. return name
  362. @property
  363. def file_path(self) -> str:
  364. return url_to_path(self.url)
  365. @property
  366. def scheme(self) -> str:
  367. return self._parsed_url.scheme
  368. @property
  369. def netloc(self) -> str:
  370. """
  371. This can contain auth information.
  372. """
  373. return self._parsed_url.netloc
  374. @property
  375. def path(self) -> str:
  376. return self._path
  377. def splitext(self) -> tuple[str, str]:
  378. return splitext(posixpath.basename(self.path.rstrip("/")))
  379. @property
  380. def ext(self) -> str:
  381. return self.splitext()[1]
  382. @property
  383. def url_without_fragment(self) -> str:
  384. scheme, netloc, path, query, fragment = self._parsed_url
  385. return urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
  386. _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
  387. # Per PEP 508.
  388. _project_name_re = re.compile(
  389. r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
  390. )
  391. def _egg_fragment(self) -> str | None:
  392. match = self._egg_fragment_re.search(self._url)
  393. if not match:
  394. return None
  395. # An egg fragment looks like a PEP 508 project name, along with
  396. # an optional extras specifier. Anything else is invalid.
  397. project_name = match.group(1)
  398. if not self._project_name_re.match(project_name):
  399. deprecated(
  400. reason=f"{self} contains an egg fragment with a non-PEP 508 name.",
  401. replacement="to use the req @ url syntax, and remove the egg fragment",
  402. gone_in="25.3",
  403. issue=13157,
  404. )
  405. return project_name
  406. _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")
  407. @property
  408. def subdirectory_fragment(self) -> str | None:
  409. match = self._subdirectory_fragment_re.search(self._url)
  410. if not match:
  411. return None
  412. return match.group(1)
  413. def metadata_link(self) -> Link | None:
  414. """Return a link to the associated core metadata file (if any)."""
  415. if self.metadata_file_data is None:
  416. return None
  417. metadata_url = f"{self.url_without_fragment}.metadata"
  418. if self.metadata_file_data.hashes is None:
  419. return Link(metadata_url)
  420. return Link(metadata_url, hashes=self.metadata_file_data.hashes)
  421. def as_hashes(self) -> Hashes:
  422. return Hashes({k: [v] for k, v in self._hashes.items()})
  423. @property
  424. def hash(self) -> str | None:
  425. return next(iter(self._hashes.values()), None)
  426. @property
  427. def hash_name(self) -> str | None:
  428. return next(iter(self._hashes), None)
  429. @property
  430. def show_url(self) -> str:
  431. return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])
  432. @property
  433. def is_file(self) -> bool:
  434. return self.scheme == "file"
  435. def is_existing_dir(self) -> bool:
  436. return self.is_file and os.path.isdir(self.file_path)
  437. @property
  438. def is_wheel(self) -> bool:
  439. return self.ext == WHEEL_EXTENSION
  440. @property
  441. def is_vcs(self) -> bool:
  442. from pip._internal.vcs import vcs
  443. return self.scheme in vcs.all_schemes
  444. @property
  445. def is_yanked(self) -> bool:
  446. return self.yanked_reason is not None
  447. @property
  448. def has_hash(self) -> bool:
  449. return bool(self._hashes)
  450. def is_hash_allowed(self, hashes: Hashes | None) -> bool:
  451. """
  452. Return True if the link has a hash and it is allowed by `hashes`.
  453. """
  454. if hashes is None:
  455. return False
  456. return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items())
  457. class _CleanResult(NamedTuple):
  458. """Convert link for equivalency check.
  459. This is used in the resolver to check whether two URL-specified requirements
  460. likely point to the same distribution and can be considered equivalent. This
  461. equivalency logic avoids comparing URLs literally, which can be too strict
  462. (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.
  463. Currently this does three things:
  464. 1. Drop the basic auth part. This is technically wrong since a server can
  465. serve different content based on auth, but if it does that, it is even
  466. impossible to guarantee two URLs without auth are equivalent, since
  467. the user can input different auth information when prompted. So the
  468. practical solution is to assume the auth doesn't affect the response.
  469. 2. Parse the query to avoid the ordering issue. Note that ordering under the
  470. same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are
  471. still considered different.
  472. 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and
  473. hash values, since it should have no impact the downloaded content. Note
  474. that this drops the "egg=" part historically used to denote the requested
  475. project (and extras), which is wrong in the strictest sense, but too many
  476. people are supplying it inconsistently to cause superfluous resolution
  477. conflicts, so we choose to also ignore them.
  478. """
  479. parsed: urllib.parse.SplitResult
  480. query: dict[str, list[str]]
  481. subdirectory: str
  482. hashes: dict[str, str]
  483. def _clean_link(link: Link) -> _CleanResult:
  484. parsed = link._parsed_url
  485. netloc = parsed.netloc.rsplit("@", 1)[-1]
  486. # According to RFC 8089, an empty host in file: means localhost.
  487. if parsed.scheme == "file" and not netloc:
  488. netloc = "localhost"
  489. fragment = urllib.parse.parse_qs(parsed.fragment)
  490. if "egg" in fragment:
  491. logger.debug("Ignoring egg= fragment in %s", link)
  492. try:
  493. # If there are multiple subdirectory values, use the first one.
  494. # This matches the behavior of Link.subdirectory_fragment.
  495. subdirectory = fragment["subdirectory"][0]
  496. except (IndexError, KeyError):
  497. subdirectory = ""
  498. # If there are multiple hash values under the same algorithm, use the
  499. # first one. This matches the behavior of Link.hash_value.
  500. hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}
  501. return _CleanResult(
  502. parsed=parsed._replace(netloc=netloc, query="", fragment=""),
  503. query=urllib.parse.parse_qs(parsed.query),
  504. subdirectory=subdirectory,
  505. hashes=hashes,
  506. )
  507. @functools.cache
  508. def links_equivalent(link1: Link, link2: Link) -> bool:
  509. return _clean_link(link1) == _clean_link(link2)