You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3826 lines
115KB

  1. import warnings
  2. from collections import Counter, defaultdict, deque, abc
  3. from collections.abc import Sequence
  4. from concurrent.futures import ThreadPoolExecutor
  5. from functools import partial, reduce, wraps
  6. from heapq import merge, heapify, heapreplace, heappop
  7. from itertools import (
  8. chain,
  9. compress,
  10. count,
  11. cycle,
  12. dropwhile,
  13. groupby,
  14. islice,
  15. repeat,
  16. starmap,
  17. takewhile,
  18. tee,
  19. zip_longest,
  20. )
  21. from math import exp, factorial, floor, log
  22. from queue import Empty, Queue
  23. from random import random, randrange, uniform
  24. from operator import itemgetter, mul, sub, gt, lt
  25. from sys import hexversion, maxsize
  26. from time import monotonic
  27. from .recipes import (
  28. consume,
  29. flatten,
  30. pairwise,
  31. powerset,
  32. take,
  33. unique_everseen,
  34. )
  35. __all__ = [
  36. 'AbortThread',
  37. 'adjacent',
  38. 'always_iterable',
  39. 'always_reversible',
  40. 'bucket',
  41. 'callback_iter',
  42. 'chunked',
  43. 'circular_shifts',
  44. 'collapse',
  45. 'collate',
  46. 'consecutive_groups',
  47. 'consumer',
  48. 'countable',
  49. 'count_cycle',
  50. 'mark_ends',
  51. 'difference',
  52. 'distinct_combinations',
  53. 'distinct_permutations',
  54. 'distribute',
  55. 'divide',
  56. 'exactly_n',
  57. 'filter_except',
  58. 'first',
  59. 'groupby_transform',
  60. 'ilen',
  61. 'interleave_longest',
  62. 'interleave',
  63. 'intersperse',
  64. 'islice_extended',
  65. 'iterate',
  66. 'ichunked',
  67. 'is_sorted',
  68. 'last',
  69. 'locate',
  70. 'lstrip',
  71. 'make_decorator',
  72. 'map_except',
  73. 'map_reduce',
  74. 'nth_or_last',
  75. 'nth_permutation',
  76. 'nth_product',
  77. 'numeric_range',
  78. 'one',
  79. 'only',
  80. 'padded',
  81. 'partitions',
  82. 'set_partitions',
  83. 'peekable',
  84. 'repeat_last',
  85. 'replace',
  86. 'rlocate',
  87. 'rstrip',
  88. 'run_length',
  89. 'sample',
  90. 'seekable',
  91. 'SequenceView',
  92. 'side_effect',
  93. 'sliced',
  94. 'sort_together',
  95. 'split_at',
  96. 'split_after',
  97. 'split_before',
  98. 'split_when',
  99. 'split_into',
  100. 'spy',
  101. 'stagger',
  102. 'strip',
  103. 'substrings',
  104. 'substrings_indexes',
  105. 'time_limited',
  106. 'unique_to_each',
  107. 'unzip',
  108. 'windowed',
  109. 'with_iter',
  110. 'UnequalIterablesError',
  111. 'zip_equal',
  112. 'zip_offset',
  113. 'windowed_complete',
  114. 'all_unique',
  115. 'value_chain',
  116. 'product_index',
  117. 'combination_index',
  118. 'permutation_index',
  119. ]
  120. _marker = object()
  121. def chunked(iterable, n, strict=False):
  122. """Break *iterable* into lists of length *n*:
  123. >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
  124. [[1, 2, 3], [4, 5, 6]]
  125. By the default, the last yielded list will have fewer than *n* elements
  126. if the length of *iterable* is not divisible by *n*:
  127. >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
  128. [[1, 2, 3], [4, 5, 6], [7, 8]]
  129. To use a fill-in value instead, see the :func:`grouper` recipe.
  130. If the length of *iterable* is not divisible by *n* and *strict* is
  131. ``True``, then ``ValueError`` will be raised before the last
  132. list is yielded.
  133. """
  134. iterator = iter(partial(take, n, iter(iterable)), [])
  135. if strict:
  136. def ret():
  137. for chunk in iterator:
  138. if len(chunk) != n:
  139. raise ValueError('iterable is not divisible by n.')
  140. yield chunk
  141. return iter(ret())
  142. else:
  143. return iterator
  144. def first(iterable, default=_marker):
  145. """Return the first item of *iterable*, or *default* if *iterable* is
  146. empty.
  147. >>> first([0, 1, 2, 3])
  148. 0
  149. >>> first([], 'some default')
  150. 'some default'
  151. If *default* is not provided and there are no items in the iterable,
  152. raise ``ValueError``.
  153. :func:`first` is useful when you have a generator of expensive-to-retrieve
  154. values and want any arbitrary one. It is marginally shorter than
  155. ``next(iter(iterable), default)``.
  156. """
  157. try:
  158. return next(iter(iterable))
  159. except StopIteration as e:
  160. if default is _marker:
  161. raise ValueError(
  162. 'first() was called on an empty iterable, and no '
  163. 'default value was provided.'
  164. ) from e
  165. return default
  166. def last(iterable, default=_marker):
  167. """Return the last item of *iterable*, or *default* if *iterable* is
  168. empty.
  169. >>> last([0, 1, 2, 3])
  170. 3
  171. >>> last([], 'some default')
  172. 'some default'
  173. If *default* is not provided and there are no items in the iterable,
  174. raise ``ValueError``.
  175. """
  176. try:
  177. if isinstance(iterable, Sequence):
  178. return iterable[-1]
  179. # Work around https://bugs.python.org/issue38525
  180. elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
  181. return next(reversed(iterable))
  182. else:
  183. return deque(iterable, maxlen=1)[-1]
  184. except (IndexError, TypeError, StopIteration):
  185. if default is _marker:
  186. raise ValueError(
  187. 'last() was called on an empty iterable, and no default was '
  188. 'provided.'
  189. )
  190. return default
  191. def nth_or_last(iterable, n, default=_marker):
  192. """Return the nth or the last item of *iterable*,
  193. or *default* if *iterable* is empty.
  194. >>> nth_or_last([0, 1, 2, 3], 2)
  195. 2
  196. >>> nth_or_last([0, 1], 2)
  197. 1
  198. >>> nth_or_last([], 0, 'some default')
  199. 'some default'
  200. If *default* is not provided and there are no items in the iterable,
  201. raise ``ValueError``.
  202. """
  203. return last(islice(iterable, n + 1), default=default)
  204. class peekable:
  205. """Wrap an iterator to allow lookahead and prepending elements.
  206. Call :meth:`peek` on the result to get the value that will be returned
  207. by :func:`next`. This won't advance the iterator:
  208. >>> p = peekable(['a', 'b'])
  209. >>> p.peek()
  210. 'a'
  211. >>> next(p)
  212. 'a'
  213. Pass :meth:`peek` a default value to return that instead of raising
  214. ``StopIteration`` when the iterator is exhausted.
  215. >>> p = peekable([])
  216. >>> p.peek('hi')
  217. 'hi'
  218. peekables also offer a :meth:`prepend` method, which "inserts" items
  219. at the head of the iterable:
  220. >>> p = peekable([1, 2, 3])
  221. >>> p.prepend(10, 11, 12)
  222. >>> next(p)
  223. 10
  224. >>> p.peek()
  225. 11
  226. >>> list(p)
  227. [11, 12, 1, 2, 3]
  228. peekables can be indexed. Index 0 is the item that will be returned by
  229. :func:`next`, index 1 is the item after that, and so on:
  230. The values up to the given index will be cached.
  231. >>> p = peekable(['a', 'b', 'c', 'd'])
  232. >>> p[0]
  233. 'a'
  234. >>> p[1]
  235. 'b'
  236. >>> next(p)
  237. 'a'
  238. Negative indexes are supported, but be aware that they will cache the
  239. remaining items in the source iterator, which may require significant
  240. storage.
  241. To check whether a peekable is exhausted, check its truth value:
  242. >>> p = peekable(['a', 'b'])
  243. >>> if p: # peekable has items
  244. ... list(p)
  245. ['a', 'b']
  246. >>> if not p: # peekable is exhausted
  247. ... list(p)
  248. []
  249. """
  250. def __init__(self, iterable):
  251. self._it = iter(iterable)
  252. self._cache = deque()
  253. def __iter__(self):
  254. return self
  255. def __bool__(self):
  256. try:
  257. self.peek()
  258. except StopIteration:
  259. return False
  260. return True
  261. def peek(self, default=_marker):
  262. """Return the item that will be next returned from ``next()``.
  263. Return ``default`` if there are no items left. If ``default`` is not
  264. provided, raise ``StopIteration``.
  265. """
  266. if not self._cache:
  267. try:
  268. self._cache.append(next(self._it))
  269. except StopIteration:
  270. if default is _marker:
  271. raise
  272. return default
  273. return self._cache[0]
  274. def prepend(self, *items):
  275. """Stack up items to be the next ones returned from ``next()`` or
  276. ``self.peek()``. The items will be returned in
  277. first in, first out order::
  278. >>> p = peekable([1, 2, 3])
  279. >>> p.prepend(10, 11, 12)
  280. >>> next(p)
  281. 10
  282. >>> list(p)
  283. [11, 12, 1, 2, 3]
  284. It is possible, by prepending items, to "resurrect" a peekable that
  285. previously raised ``StopIteration``.
  286. >>> p = peekable([])
  287. >>> next(p)
  288. Traceback (most recent call last):
  289. ...
  290. StopIteration
  291. >>> p.prepend(1)
  292. >>> next(p)
  293. 1
  294. >>> next(p)
  295. Traceback (most recent call last):
  296. ...
  297. StopIteration
  298. """
  299. self._cache.extendleft(reversed(items))
  300. def __next__(self):
  301. if self._cache:
  302. return self._cache.popleft()
  303. return next(self._it)
  304. def _get_slice(self, index):
  305. # Normalize the slice's arguments
  306. step = 1 if (index.step is None) else index.step
  307. if step > 0:
  308. start = 0 if (index.start is None) else index.start
  309. stop = maxsize if (index.stop is None) else index.stop
  310. elif step < 0:
  311. start = -1 if (index.start is None) else index.start
  312. stop = (-maxsize - 1) if (index.stop is None) else index.stop
  313. else:
  314. raise ValueError('slice step cannot be zero')
  315. # If either the start or stop index is negative, we'll need to cache
  316. # the rest of the iterable in order to slice from the right side.
  317. if (start < 0) or (stop < 0):
  318. self._cache.extend(self._it)
  319. # Otherwise we'll need to find the rightmost index and cache to that
  320. # point.
  321. else:
  322. n = min(max(start, stop) + 1, maxsize)
  323. cache_len = len(self._cache)
  324. if n >= cache_len:
  325. self._cache.extend(islice(self._it, n - cache_len))
  326. return list(self._cache)[index]
  327. def __getitem__(self, index):
  328. if isinstance(index, slice):
  329. return self._get_slice(index)
  330. cache_len = len(self._cache)
  331. if index < 0:
  332. self._cache.extend(self._it)
  333. elif index >= cache_len:
  334. self._cache.extend(islice(self._it, index + 1 - cache_len))
  335. return self._cache[index]
  336. def collate(*iterables, **kwargs):
  337. """Return a sorted merge of the items from each of several already-sorted
  338. *iterables*.
  339. >>> list(collate('ACDZ', 'AZ', 'JKL'))
  340. ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
  341. Works lazily, keeping only the next value from each iterable in memory. Use
  342. :func:`collate` to, for example, perform a n-way mergesort of items that
  343. don't fit in memory.
  344. If a *key* function is specified, the iterables will be sorted according
  345. to its result:
  346. >>> key = lambda s: int(s) # Sort by numeric value, not by string
  347. >>> list(collate(['1', '10'], ['2', '11'], key=key))
  348. ['1', '2', '10', '11']
  349. If the *iterables* are sorted in descending order, set *reverse* to
  350. ``True``:
  351. >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
  352. [5, 4, 3, 2, 1, 0]
  353. If the elements of the passed-in iterables are out of order, you might get
  354. unexpected results.
  355. On Python 3.5+, this function is an alias for :func:`heapq.merge`.
  356. """
  357. warnings.warn(
  358. "collate is no longer part of more_itertools, use heapq.merge",
  359. DeprecationWarning,
  360. )
  361. return merge(*iterables, **kwargs)
  362. def consumer(func):
  363. """Decorator that automatically advances a PEP-342-style "reverse iterator"
  364. to its first yield point so you don't have to call ``next()`` on it
  365. manually.
  366. >>> @consumer
  367. ... def tally():
  368. ... i = 0
  369. ... while True:
  370. ... print('Thing number %s is %s.' % (i, (yield)))
  371. ... i += 1
  372. ...
  373. >>> t = tally()
  374. >>> t.send('red')
  375. Thing number 0 is red.
  376. >>> t.send('fish')
  377. Thing number 1 is fish.
  378. Without the decorator, you would have to call ``next(t)`` before
  379. ``t.send()`` could be used.
  380. """
  381. @wraps(func)
  382. def wrapper(*args, **kwargs):
  383. gen = func(*args, **kwargs)
  384. next(gen)
  385. return gen
  386. return wrapper
  387. def ilen(iterable):
  388. """Return the number of items in *iterable*.
  389. >>> ilen(x for x in range(1000000) if x % 3 == 0)
  390. 333334
  391. This consumes the iterable, so handle with care.
  392. """
  393. # This approach was selected because benchmarks showed it's likely the
  394. # fastest of the known implementations at the time of writing.
  395. # See GitHub tracker: #236, #230.
  396. counter = count()
  397. deque(zip(iterable, counter), maxlen=0)
  398. return next(counter)
  399. def iterate(func, start):
  400. """Return ``start``, ``func(start)``, ``func(func(start))``, ...
  401. >>> from itertools import islice
  402. >>> list(islice(iterate(lambda x: 2*x, 1), 10))
  403. [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
  404. """
  405. while True:
  406. yield start
  407. start = func(start)
  408. def with_iter(context_manager):
  409. """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
  410. For example, this will close the file when the iterator is exhausted::
  411. upper_lines = (line.upper() for line in with_iter(open('foo')))
  412. Any context manager which returns an iterable is a candidate for
  413. ``with_iter``.
  414. """
  415. with context_manager as iterable:
  416. yield from iterable
  417. def one(iterable, too_short=None, too_long=None):
  418. """Return the first item from *iterable*, which is expected to contain only
  419. that item. Raise an exception if *iterable* is empty or has more than one
  420. item.
  421. :func:`one` is useful for ensuring that an iterable contains only one item.
  422. For example, it can be used to retrieve the result of a database query
  423. that is expected to return a single row.
  424. If *iterable* is empty, ``ValueError`` will be raised. You may specify a
  425. different exception with the *too_short* keyword:
  426. >>> it = []
  427. >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
  428. Traceback (most recent call last):
  429. ...
  430. ValueError: too many items in iterable (expected 1)'
  431. >>> too_short = IndexError('too few items')
  432. >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
  433. Traceback (most recent call last):
  434. ...
  435. IndexError: too few items
  436. Similarly, if *iterable* contains more than one item, ``ValueError`` will
  437. be raised. You may specify a different exception with the *too_long*
  438. keyword:
  439. >>> it = ['too', 'many']
  440. >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
  441. Traceback (most recent call last):
  442. ...
  443. ValueError: Expected exactly one item in iterable, but got 'too',
  444. 'many', and perhaps more.
  445. >>> too_long = RuntimeError
  446. >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
  447. Traceback (most recent call last):
  448. ...
  449. RuntimeError
  450. Note that :func:`one` attempts to advance *iterable* twice to ensure there
  451. is only one item. See :func:`spy` or :func:`peekable` to check iterable
  452. contents less destructively.
  453. """
  454. it = iter(iterable)
  455. try:
  456. first_value = next(it)
  457. except StopIteration as e:
  458. raise (
  459. too_short or ValueError('too few items in iterable (expected 1)')
  460. ) from e
  461. try:
  462. second_value = next(it)
  463. except StopIteration:
  464. pass
  465. else:
  466. msg = (
  467. 'Expected exactly one item in iterable, but got {!r}, {!r}, '
  468. 'and perhaps more.'.format(first_value, second_value)
  469. )
  470. raise too_long or ValueError(msg)
  471. return first_value
  472. def distinct_permutations(iterable, r=None):
  473. """Yield successive distinct permutations of the elements in *iterable*.
  474. >>> sorted(distinct_permutations([1, 0, 1]))
  475. [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
  476. Equivalent to ``set(permutations(iterable))``, except duplicates are not
  477. generated and thrown away. For larger input sequences this is much more
  478. efficient.
  479. Duplicate permutations arise when there are duplicated elements in the
  480. input iterable. The number of items returned is
  481. `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
  482. items input, and each `x_i` is the count of a distinct item in the input
  483. sequence.
  484. If *r* is given, only the *r*-length permutations are yielded.
  485. >>> sorted(distinct_permutations([1, 0, 1], r=2))
  486. [(0, 1), (1, 0), (1, 1)]
  487. >>> sorted(distinct_permutations(range(3), r=2))
  488. [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
  489. """
  490. # Algorithm: https://w.wiki/Qai
  491. def _full(A):
  492. while True:
  493. # Yield the permutation we have
  494. yield tuple(A)
  495. # Find the largest index i such that A[i] < A[i + 1]
  496. for i in range(size - 2, -1, -1):
  497. if A[i] < A[i + 1]:
  498. break
  499. # If no such index exists, this permutation is the last one
  500. else:
  501. return
  502. # Find the largest index j greater than j such that A[i] < A[j]
  503. for j in range(size - 1, i, -1):
  504. if A[i] < A[j]:
  505. break
  506. # Swap the value of A[i] with that of A[j], then reverse the
  507. # sequence from A[i + 1] to form the new permutation
  508. A[i], A[j] = A[j], A[i]
  509. A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
  510. # Algorithm: modified from the above
  511. def _partial(A, r):
  512. # Split A into the first r items and the last r items
  513. head, tail = A[:r], A[r:]
  514. right_head_indexes = range(r - 1, -1, -1)
  515. left_tail_indexes = range(len(tail))
  516. while True:
  517. # Yield the permutation we have
  518. yield tuple(head)
  519. # Starting from the right, find the first index of the head with
  520. # value smaller than the maximum value of the tail - call it i.
  521. pivot = tail[-1]
  522. for i in right_head_indexes:
  523. if head[i] < pivot:
  524. break
  525. pivot = head[i]
  526. else:
  527. return
  528. # Starting from the left, find the first value of the tail
  529. # with a value greater than head[i] and swap.
  530. for j in left_tail_indexes:
  531. if tail[j] > head[i]:
  532. head[i], tail[j] = tail[j], head[i]
  533. break
  534. # If we didn't find one, start from the right and find the first
  535. # index of the head with a value greater than head[i] and swap.
  536. else:
  537. for j in right_head_indexes:
  538. if head[j] > head[i]:
  539. head[i], head[j] = head[j], head[i]
  540. break
  541. # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
  542. tail += head[: i - r : -1] # head[i + 1:][::-1]
  543. i += 1
  544. head[i:], tail[:] = tail[: r - i], tail[r - i :]
  545. items = sorted(iterable)
  546. size = len(items)
  547. if r is None:
  548. r = size
  549. if 0 < r <= size:
  550. return _full(items) if (r == size) else _partial(items, r)
  551. return iter(() if r else ((),))
  552. def intersperse(e, iterable, n=1):
  553. """Intersperse filler element *e* among the items in *iterable*, leaving
  554. *n* items between each filler element.
  555. >>> list(intersperse('!', [1, 2, 3, 4, 5]))
  556. [1, '!', 2, '!', 3, '!', 4, '!', 5]
  557. >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
  558. [1, 2, None, 3, 4, None, 5]
  559. """
  560. if n == 0:
  561. raise ValueError('n must be > 0')
  562. elif n == 1:
  563. # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
  564. # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
  565. return islice(interleave(repeat(e), iterable), 1, None)
  566. else:
  567. # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
  568. # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
  569. # flatten(...) -> x_0, x_1, e, x_2, x_3...
  570. filler = repeat([e])
  571. chunks = chunked(iterable, n)
  572. return flatten(islice(interleave(filler, chunks), 1, None))
  573. def unique_to_each(*iterables):
  574. """Return the elements from each of the input iterables that aren't in the
  575. other input iterables.
  576. For example, suppose you have a set of packages, each with a set of
  577. dependencies::
  578. {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
  579. If you remove one package, which dependencies can also be removed?
  580. If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
  581. associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
  582. ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
  583. >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
  584. [['A'], ['C'], ['D']]
  585. If there are duplicates in one input iterable that aren't in the others
  586. they will be duplicated in the output. Input order is preserved::
  587. >>> unique_to_each("mississippi", "missouri")
  588. [['p', 'p'], ['o', 'u', 'r']]
  589. It is assumed that the elements of each iterable are hashable.
  590. """
  591. pool = [list(it) for it in iterables]
  592. counts = Counter(chain.from_iterable(map(set, pool)))
  593. uniques = {element for element in counts if counts[element] == 1}
  594. return [list(filter(uniques.__contains__, it)) for it in pool]
  595. def windowed(seq, n, fillvalue=None, step=1):
  596. """Return a sliding window of width *n* over the given iterable.
  597. >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
  598. >>> list(all_windows)
  599. [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
  600. When the window is larger than the iterable, *fillvalue* is used in place
  601. of missing values:
  602. >>> list(windowed([1, 2, 3], 4))
  603. [(1, 2, 3, None)]
  604. Each window will advance in increments of *step*:
  605. >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
  606. [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
  607. To slide into the iterable's items, use :func:`chain` to add filler items
  608. to the left:
  609. >>> iterable = [1, 2, 3, 4]
  610. >>> n = 3
  611. >>> padding = [None] * (n - 1)
  612. >>> list(windowed(chain(padding, iterable), 3))
  613. [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
  614. """
  615. if n < 0:
  616. raise ValueError('n must be >= 0')
  617. if n == 0:
  618. yield tuple()
  619. return
  620. if step < 1:
  621. raise ValueError('step must be >= 1')
  622. window = deque(maxlen=n)
  623. i = n
  624. for _ in map(window.append, seq):
  625. i -= 1
  626. if not i:
  627. i = step
  628. yield tuple(window)
  629. size = len(window)
  630. if size < n:
  631. yield tuple(chain(window, repeat(fillvalue, n - size)))
  632. elif 0 < i < min(step, n):
  633. window += (fillvalue,) * i
  634. yield tuple(window)
  635. def substrings(iterable):
  636. """Yield all of the substrings of *iterable*.
  637. >>> [''.join(s) for s in substrings('more')]
  638. ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
  639. Note that non-string iterables can also be subdivided.
  640. >>> list(substrings([0, 1, 2]))
  641. [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
  642. """
  643. # The length-1 substrings
  644. seq = []
  645. for item in iter(iterable):
  646. seq.append(item)
  647. yield (item,)
  648. seq = tuple(seq)
  649. item_count = len(seq)
  650. # And the rest
  651. for n in range(2, item_count + 1):
  652. for i in range(item_count - n + 1):
  653. yield seq[i : i + n]
  654. def substrings_indexes(seq, reverse=False):
  655. """Yield all substrings and their positions in *seq*
  656. The items yielded will be a tuple of the form ``(substr, i, j)``, where
  657. ``substr == seq[i:j]``.
  658. This function only works for iterables that support slicing, such as
  659. ``str`` objects.
  660. >>> for item in substrings_indexes('more'):
  661. ... print(item)
  662. ('m', 0, 1)
  663. ('o', 1, 2)
  664. ('r', 2, 3)
  665. ('e', 3, 4)
  666. ('mo', 0, 2)
  667. ('or', 1, 3)
  668. ('re', 2, 4)
  669. ('mor', 0, 3)
  670. ('ore', 1, 4)
  671. ('more', 0, 4)
  672. Set *reverse* to ``True`` to yield the same items in the opposite order.
  673. """
  674. r = range(1, len(seq) + 1)
  675. if reverse:
  676. r = reversed(r)
  677. return (
  678. (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
  679. )
  680. class bucket:
  681. """Wrap *iterable* and return an object that buckets it iterable into
  682. child iterables based on a *key* function.
  683. >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
  684. >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
  685. >>> sorted(list(s)) # Get the keys
  686. ['a', 'b', 'c']
  687. >>> a_iterable = s['a']
  688. >>> next(a_iterable)
  689. 'a1'
  690. >>> next(a_iterable)
  691. 'a2'
  692. >>> list(s['b'])
  693. ['b1', 'b2', 'b3']
  694. The original iterable will be advanced and its items will be cached until
  695. they are used by the child iterables. This may require significant storage.
  696. By default, attempting to select a bucket to which no items belong will
  697. exhaust the iterable and cache all values.
  698. If you specify a *validator* function, selected buckets will instead be
  699. checked against it.
  700. >>> from itertools import count
  701. >>> it = count(1, 2) # Infinite sequence of odd numbers
  702. >>> key = lambda x: x % 10 # Bucket by last digit
  703. >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
  704. >>> s = bucket(it, key=key, validator=validator)
  705. >>> 2 in s
  706. False
  707. >>> list(s[2])
  708. []
  709. """
  710. def __init__(self, iterable, key, validator=None):
  711. self._it = iter(iterable)
  712. self._key = key
  713. self._cache = defaultdict(deque)
  714. self._validator = validator or (lambda x: True)
  715. def __contains__(self, value):
  716. if not self._validator(value):
  717. return False
  718. try:
  719. item = next(self[value])
  720. except StopIteration:
  721. return False
  722. else:
  723. self._cache[value].appendleft(item)
  724. return True
  725. def _get_values(self, value):
  726. """
  727. Helper to yield items from the parent iterator that match *value*.
  728. Items that don't match are stored in the local cache as they
  729. are encountered.
  730. """
  731. while True:
  732. # If we've cached some items that match the target value, emit
  733. # the first one and evict it from the cache.
  734. if self._cache[value]:
  735. yield self._cache[value].popleft()
  736. # Otherwise we need to advance the parent iterator to search for
  737. # a matching item, caching the rest.
  738. else:
  739. while True:
  740. try:
  741. item = next(self._it)
  742. except StopIteration:
  743. return
  744. item_value = self._key(item)
  745. if item_value == value:
  746. yield item
  747. break
  748. elif self._validator(item_value):
  749. self._cache[item_value].append(item)
  750. def __iter__(self):
  751. for item in self._it:
  752. item_value = self._key(item)
  753. if self._validator(item_value):
  754. self._cache[item_value].append(item)
  755. yield from self._cache.keys()
  756. def __getitem__(self, value):
  757. if not self._validator(value):
  758. return iter(())
  759. return self._get_values(value)
  760. def spy(iterable, n=1):
  761. """Return a 2-tuple with a list containing the first *n* elements of
  762. *iterable*, and an iterator with the same items as *iterable*.
  763. This allows you to "look ahead" at the items in the iterable without
  764. advancing it.
  765. There is one item in the list by default:
  766. >>> iterable = 'abcdefg'
  767. >>> head, iterable = spy(iterable)
  768. >>> head
  769. ['a']
  770. >>> list(iterable)
  771. ['a', 'b', 'c', 'd', 'e', 'f', 'g']
  772. You may use unpacking to retrieve items instead of lists:
  773. >>> (head,), iterable = spy('abcdefg')
  774. >>> head
  775. 'a'
  776. >>> (first, second), iterable = spy('abcdefg', 2)
  777. >>> first
  778. 'a'
  779. >>> second
  780. 'b'
  781. The number of items requested can be larger than the number of items in
  782. the iterable:
  783. >>> iterable = [1, 2, 3, 4, 5]
  784. >>> head, iterable = spy(iterable, 10)
  785. >>> head
  786. [1, 2, 3, 4, 5]
  787. >>> list(iterable)
  788. [1, 2, 3, 4, 5]
  789. """
  790. it = iter(iterable)
  791. head = take(n, it)
  792. return head.copy(), chain(head, it)
  793. def interleave(*iterables):
  794. """Return a new iterable yielding from each iterable in turn,
  795. until the shortest is exhausted.
  796. >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
  797. [1, 4, 6, 2, 5, 7]
  798. For a version that doesn't terminate after the shortest iterable is
  799. exhausted, see :func:`interleave_longest`.
  800. """
  801. return chain.from_iterable(zip(*iterables))
  802. def interleave_longest(*iterables):
  803. """Return a new iterable yielding from each iterable in turn,
  804. skipping any that are exhausted.
  805. >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
  806. [1, 4, 6, 2, 5, 7, 3, 8]
  807. This function produces the same output as :func:`roundrobin`, but may
  808. perform better for some inputs (in particular when the number of iterables
  809. is large).
  810. """
  811. i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
  812. return (x for x in i if x is not _marker)
  813. def collapse(iterable, base_type=None, levels=None):
  814. """Flatten an iterable with multiple levels of nesting (e.g., a list of
  815. lists of tuples) into non-iterable types.
  816. >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
  817. >>> list(collapse(iterable))
  818. [1, 2, 3, 4, 5, 6]
  819. Binary and text strings are not considered iterable and
  820. will not be collapsed.
  821. To avoid collapsing other types, specify *base_type*:
  822. >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
  823. >>> list(collapse(iterable, base_type=tuple))
  824. ['ab', ('cd', 'ef'), 'gh', 'ij']
  825. Specify *levels* to stop flattening after a certain level:
  826. >>> iterable = [('a', ['b']), ('c', ['d'])]
  827. >>> list(collapse(iterable)) # Fully flattened
  828. ['a', 'b', 'c', 'd']
  829. >>> list(collapse(iterable, levels=1)) # Only one level flattened
  830. ['a', ['b'], 'c', ['d']]
  831. """
  832. def walk(node, level):
  833. if (
  834. ((levels is not None) and (level > levels))
  835. or isinstance(node, (str, bytes))
  836. or ((base_type is not None) and isinstance(node, base_type))
  837. ):
  838. yield node
  839. return
  840. try:
  841. tree = iter(node)
  842. except TypeError:
  843. yield node
  844. return
  845. else:
  846. for child in tree:
  847. yield from walk(child, level + 1)
  848. yield from walk(iterable, 0)
  849. def side_effect(func, iterable, chunk_size=None, before=None, after=None):
  850. """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
  851. of items) before yielding the item.
  852. `func` must be a function that takes a single argument. Its return value
  853. will be discarded.
  854. *before* and *after* are optional functions that take no arguments. They
  855. will be executed before iteration starts and after it ends, respectively.
  856. `side_effect` can be used for logging, updating progress bars, or anything
  857. that is not functionally "pure."
  858. Emitting a status message:
  859. >>> from more_itertools import consume
  860. >>> func = lambda item: print('Received {}'.format(item))
  861. >>> consume(side_effect(func, range(2)))
  862. Received 0
  863. Received 1
  864. Operating on chunks of items:
  865. >>> pair_sums = []
  866. >>> func = lambda chunk: pair_sums.append(sum(chunk))
  867. >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
  868. [0, 1, 2, 3, 4, 5]
  869. >>> list(pair_sums)
  870. [1, 5, 9]
  871. Writing to a file-like object:
  872. >>> from io import StringIO
  873. >>> from more_itertools import consume
  874. >>> f = StringIO()
  875. >>> func = lambda x: print(x, file=f)
  876. >>> before = lambda: print(u'HEADER', file=f)
  877. >>> after = f.close
  878. >>> it = [u'a', u'b', u'c']
  879. >>> consume(side_effect(func, it, before=before, after=after))
  880. >>> f.closed
  881. True
  882. """
  883. try:
  884. if before is not None:
  885. before()
  886. if chunk_size is None:
  887. for item in iterable:
  888. func(item)
  889. yield item
  890. else:
  891. for chunk in chunked(iterable, chunk_size):
  892. func(chunk)
  893. yield from chunk
  894. finally:
  895. if after is not None:
  896. after()
  897. def sliced(seq, n, strict=False):
  898. """Yield slices of length *n* from the sequence *seq*.
  899. >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
  900. [(1, 2, 3), (4, 5, 6)]
  901. By the default, the last yielded slice will have fewer than *n* elements
  902. if the length of *seq* is not divisible by *n*:
  903. >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
  904. [(1, 2, 3), (4, 5, 6), (7, 8)]
  905. If the length of *seq* is not divisible by *n* and *strict* is
  906. ``True``, then ``ValueError`` will be raised before the last
  907. slice is yielded.
  908. This function will only work for iterables that support slicing.
  909. For non-sliceable iterables, see :func:`chunked`.
  910. """
  911. iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
  912. if strict:
  913. def ret():
  914. for _slice in iterator:
  915. if len(_slice) != n:
  916. raise ValueError("seq is not divisible by n.")
  917. yield _slice
  918. return iter(ret())
  919. else:
  920. return iterator
  921. def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
  922. """Yield lists of items from *iterable*, where each list is delimited by
  923. an item where callable *pred* returns ``True``.
  924. >>> list(split_at('abcdcba', lambda x: x == 'b'))
  925. [['a'], ['c', 'd', 'c'], ['a']]
  926. >>> list(split_at(range(10), lambda n: n % 2 == 1))
  927. [[0], [2], [4], [6], [8], []]
  928. At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
  929. then there is no limit on the number of splits:
  930. >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
  931. [[0], [2], [4, 5, 6, 7, 8, 9]]
  932. By default, the delimiting items are not included in the output.
  933. The include them, set *keep_separator* to ``True``.
  934. >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
  935. [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
  936. """
  937. if maxsplit == 0:
  938. yield list(iterable)
  939. return
  940. buf = []
  941. it = iter(iterable)
  942. for item in it:
  943. if pred(item):
  944. yield buf
  945. if keep_separator:
  946. yield [item]
  947. if maxsplit == 1:
  948. yield list(it)
  949. return
  950. buf = []
  951. maxsplit -= 1
  952. else:
  953. buf.append(item)
  954. yield buf
  955. def split_before(iterable, pred, maxsplit=-1):
  956. """Yield lists of items from *iterable*, where each list ends just before
  957. an item for which callable *pred* returns ``True``:
  958. >>> list(split_before('OneTwo', lambda s: s.isupper()))
  959. [['O', 'n', 'e'], ['T', 'w', 'o']]
  960. >>> list(split_before(range(10), lambda n: n % 3 == 0))
  961. [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
  962. At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
  963. then there is no limit on the number of splits:
  964. >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
  965. [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
  966. """
  967. if maxsplit == 0:
  968. yield list(iterable)
  969. return
  970. buf = []
  971. it = iter(iterable)
  972. for item in it:
  973. if pred(item) and buf:
  974. yield buf
  975. if maxsplit == 1:
  976. yield [item] + list(it)
  977. return
  978. buf = []
  979. maxsplit -= 1
  980. buf.append(item)
  981. if buf:
  982. yield buf
  983. def split_after(iterable, pred, maxsplit=-1):
  984. """Yield lists of items from *iterable*, where each list ends with an
  985. item where callable *pred* returns ``True``:
  986. >>> list(split_after('one1two2', lambda s: s.isdigit()))
  987. [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
  988. >>> list(split_after(range(10), lambda n: n % 3 == 0))
  989. [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
  990. At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
  991. then there is no limit on the number of splits:
  992. >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
  993. [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
  994. """
  995. if maxsplit == 0:
  996. yield list(iterable)
  997. return
  998. buf = []
  999. it = iter(iterable)
  1000. for item in it:
  1001. buf.append(item)
  1002. if pred(item) and buf:
  1003. yield buf
  1004. if maxsplit == 1:
  1005. yield list(it)
  1006. return
  1007. buf = []
  1008. maxsplit -= 1
  1009. if buf:
  1010. yield buf
  1011. def split_when(iterable, pred, maxsplit=-1):
  1012. """Split *iterable* into pieces based on the output of *pred*.
  1013. *pred* should be a function that takes successive pairs of items and
  1014. returns ``True`` if the iterable should be split in between them.
  1015. For example, to find runs of increasing numbers, split the iterable when
  1016. element ``i`` is larger than element ``i + 1``:
  1017. >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
  1018. [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
  1019. At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
  1020. then there is no limit on the number of splits:
  1021. >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
  1022. ... lambda x, y: x > y, maxsplit=2))
  1023. [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
  1024. """
  1025. if maxsplit == 0:
  1026. yield list(iterable)
  1027. return
  1028. it = iter(iterable)
  1029. try:
  1030. cur_item = next(it)
  1031. except StopIteration:
  1032. return
  1033. buf = [cur_item]
  1034. for next_item in it:
  1035. if pred(cur_item, next_item):
  1036. yield buf
  1037. if maxsplit == 1:
  1038. yield [next_item] + list(it)
  1039. return
  1040. buf = []
  1041. maxsplit -= 1
  1042. buf.append(next_item)
  1043. cur_item = next_item
  1044. yield buf
  1045. def split_into(iterable, sizes):
  1046. """Yield a list of sequential items from *iterable* of length 'n' for each
  1047. integer 'n' in *sizes*.
  1048. >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
  1049. [[1], [2, 3], [4, 5, 6]]
  1050. If the sum of *sizes* is smaller than the length of *iterable*, then the
  1051. remaining items of *iterable* will not be returned.
  1052. >>> list(split_into([1,2,3,4,5,6], [2,3]))
  1053. [[1, 2], [3, 4, 5]]
  1054. If the sum of *sizes* is larger than the length of *iterable*, fewer items
  1055. will be returned in the iteration that overruns *iterable* and further
  1056. lists will be empty:
  1057. >>> list(split_into([1,2,3,4], [1,2,3,4]))
  1058. [[1], [2, 3], [4], []]
  1059. When a ``None`` object is encountered in *sizes*, the returned list will
  1060. contain items up to the end of *iterable* the same way that itertools.slice
  1061. does:
  1062. >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
  1063. [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
  1064. :func:`split_into` can be useful for grouping a series of items where the
  1065. sizes of the groups are not uniform. An example would be where in a row
  1066. from a table, multiple columns represent elements of the same feature
  1067. (e.g. a point represented by x,y,z) but, the format is not the same for
  1068. all columns.
  1069. """
  1070. # convert the iterable argument into an iterator so its contents can
  1071. # be consumed by islice in case it is a generator
  1072. it = iter(iterable)
  1073. for size in sizes:
  1074. if size is None:
  1075. yield list(it)
  1076. return
  1077. else:
  1078. yield list(islice(it, size))
  1079. def padded(iterable, fillvalue=None, n=None, next_multiple=False):
  1080. """Yield the elements from *iterable*, followed by *fillvalue*, such that
  1081. at least *n* items are emitted.
  1082. >>> list(padded([1, 2, 3], '?', 5))
  1083. [1, 2, 3, '?', '?']
  1084. If *next_multiple* is ``True``, *fillvalue* will be emitted until the
  1085. number of items emitted is a multiple of *n*::
  1086. >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
  1087. [1, 2, 3, 4, None, None]
  1088. If *n* is ``None``, *fillvalue* will be emitted indefinitely.
  1089. """
  1090. it = iter(iterable)
  1091. if n is None:
  1092. yield from chain(it, repeat(fillvalue))
  1093. elif n < 1:
  1094. raise ValueError('n must be at least 1')
  1095. else:
  1096. item_count = 0
  1097. for item in it:
  1098. yield item
  1099. item_count += 1
  1100. remaining = (n - item_count) % n if next_multiple else n - item_count
  1101. for _ in range(remaining):
  1102. yield fillvalue
  1103. def repeat_last(iterable, default=None):
  1104. """After the *iterable* is exhausted, keep yielding its last element.
  1105. >>> list(islice(repeat_last(range(3)), 5))
  1106. [0, 1, 2, 2, 2]
  1107. If the iterable is empty, yield *default* forever::
  1108. >>> list(islice(repeat_last(range(0), 42), 5))
  1109. [42, 42, 42, 42, 42]
  1110. """
  1111. item = _marker
  1112. for item in iterable:
  1113. yield item
  1114. final = default if item is _marker else item
  1115. yield from repeat(final)
  1116. def distribute(n, iterable):
  1117. """Distribute the items from *iterable* among *n* smaller iterables.
  1118. >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
  1119. >>> list(group_1)
  1120. [1, 3, 5]
  1121. >>> list(group_2)
  1122. [2, 4, 6]
  1123. If the length of *iterable* is not evenly divisible by *n*, then the
  1124. length of the returned iterables will not be identical:
  1125. >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
  1126. >>> [list(c) for c in children]
  1127. [[1, 4, 7], [2, 5], [3, 6]]
  1128. If the length of *iterable* is smaller than *n*, then the last returned
  1129. iterables will be empty:
  1130. >>> children = distribute(5, [1, 2, 3])
  1131. >>> [list(c) for c in children]
  1132. [[1], [2], [3], [], []]
  1133. This function uses :func:`itertools.tee` and may require significant
  1134. storage. If you need the order items in the smaller iterables to match the
  1135. original iterable, see :func:`divide`.
  1136. """
  1137. if n < 1:
  1138. raise ValueError('n must be at least 1')
  1139. children = tee(iterable, n)
  1140. return [islice(it, index, None, n) for index, it in enumerate(children)]
  1141. def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
  1142. """Yield tuples whose elements are offset from *iterable*.
  1143. The amount by which the `i`-th item in each tuple is offset is given by
  1144. the `i`-th item in *offsets*.
  1145. >>> list(stagger([0, 1, 2, 3]))
  1146. [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
  1147. >>> list(stagger(range(8), offsets=(0, 2, 4)))
  1148. [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
  1149. By default, the sequence will end when the final element of a tuple is the
  1150. last item in the iterable. To continue until the first element of a tuple
  1151. is the last item in the iterable, set *longest* to ``True``::
  1152. >>> list(stagger([0, 1, 2, 3], longest=True))
  1153. [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
  1154. By default, ``None`` will be used to replace offsets beyond the end of the
  1155. sequence. Specify *fillvalue* to use some other value.
  1156. """
  1157. children = tee(iterable, len(offsets))
  1158. return zip_offset(
  1159. *children, offsets=offsets, longest=longest, fillvalue=fillvalue
  1160. )
  1161. class UnequalIterablesError(ValueError):
  1162. def __init__(self, details=None):
  1163. msg = 'Iterables have different lengths'
  1164. if details is not None:
  1165. msg += (': index 0 has length {}; index {} has length {}').format(
  1166. *details
  1167. )
  1168. super().__init__(msg)
  1169. def _zip_equal_generator(iterables):
  1170. for combo in zip_longest(*iterables, fillvalue=_marker):
  1171. for val in combo:
  1172. if val is _marker:
  1173. raise UnequalIterablesError()
  1174. yield combo
  1175. def zip_equal(*iterables):
  1176. """``zip`` the input *iterables* together, but raise
  1177. ``UnequalIterablesError`` if they aren't all the same length.
  1178. >>> it_1 = range(3)
  1179. >>> it_2 = iter('abc')
  1180. >>> list(zip_equal(it_1, it_2))
  1181. [(0, 'a'), (1, 'b'), (2, 'c')]
  1182. >>> it_1 = range(3)
  1183. >>> it_2 = iter('abcd')
  1184. >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
  1185. Traceback (most recent call last):
  1186. ...
  1187. more_itertools.more.UnequalIterablesError: Iterables have different
  1188. lengths
  1189. """
  1190. if hexversion >= 0x30A00A6:
  1191. warnings.warn(
  1192. (
  1193. 'zip_equal will be removed in a future version of '
  1194. 'more-itertools. Use the builtin zip function with '
  1195. 'strict=True instead.'
  1196. ),
  1197. DeprecationWarning,
  1198. )
  1199. # Check whether the iterables are all the same size.
  1200. try:
  1201. first_size = len(iterables[0])
  1202. for i, it in enumerate(iterables[1:], 1):
  1203. size = len(it)
  1204. if size != first_size:
  1205. break
  1206. else:
  1207. # If we didn't break out, we can use the built-in zip.
  1208. return zip(*iterables)
  1209. # If we did break out, there was a mismatch.
  1210. raise UnequalIterablesError(details=(first_size, i, size))
  1211. # If any one of the iterables didn't have a length, start reading
  1212. # them until one runs out.
  1213. except TypeError:
  1214. return _zip_equal_generator(iterables)
  1215. def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
  1216. """``zip`` the input *iterables* together, but offset the `i`-th iterable
  1217. by the `i`-th item in *offsets*.
  1218. >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
  1219. [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
  1220. This can be used as a lightweight alternative to SciPy or pandas to analyze
  1221. data sets in which some series have a lead or lag relationship.
  1222. By default, the sequence will end when the shortest iterable is exhausted.
  1223. To continue until the longest iterable is exhausted, set *longest* to
  1224. ``True``.
  1225. >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
  1226. [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
  1227. By default, ``None`` will be used to replace offsets beyond the end of the
  1228. sequence. Specify *fillvalue* to use some other value.
  1229. """
  1230. if len(iterables) != len(offsets):
  1231. raise ValueError("Number of iterables and offsets didn't match")
  1232. staggered = []
  1233. for it, n in zip(iterables, offsets):
  1234. if n < 0:
  1235. staggered.append(chain(repeat(fillvalue, -n), it))
  1236. elif n > 0:
  1237. staggered.append(islice(it, n, None))
  1238. else:
  1239. staggered.append(it)
  1240. if longest:
  1241. return zip_longest(*staggered, fillvalue=fillvalue)
  1242. return zip(*staggered)
  1243. def sort_together(iterables, key_list=(0,), key=None, reverse=False):
  1244. """Return the input iterables sorted together, with *key_list* as the
  1245. priority for sorting. All iterables are trimmed to the length of the
  1246. shortest one.
  1247. This can be used like the sorting function in a spreadsheet. If each
  1248. iterable represents a column of data, the key list determines which
  1249. columns are used for sorting.
  1250. By default, all iterables are sorted using the ``0``-th iterable::
  1251. >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
  1252. >>> sort_together(iterables)
  1253. [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
  1254. Set a different key list to sort according to another iterable.
  1255. Specifying multiple keys dictates how ties are broken::
  1256. >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
  1257. >>> sort_together(iterables, key_list=(1, 2))
  1258. [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
  1259. To sort by a function of the elements of the iterable, pass a *key*
  1260. function. Its arguments are the elements of the iterables corresponding to
  1261. the key list::
  1262. >>> names = ('a', 'b', 'c')
  1263. >>> lengths = (1, 2, 3)
  1264. >>> widths = (5, 2, 1)
  1265. >>> def area(length, width):
  1266. ... return length * width
  1267. >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
  1268. [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
  1269. Set *reverse* to ``True`` to sort in descending order.
  1270. >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
  1271. [(3, 2, 1), ('a', 'b', 'c')]
  1272. """
  1273. if key is None:
  1274. # if there is no key function, the key argument to sorted is an
  1275. # itemgetter
  1276. key_argument = itemgetter(*key_list)
  1277. else:
  1278. # if there is a key function, call it with the items at the offsets
  1279. # specified by the key function as arguments
  1280. key_list = list(key_list)
  1281. if len(key_list) == 1:
  1282. # if key_list contains a single item, pass the item at that offset
  1283. # as the only argument to the key function
  1284. key_offset = key_list[0]
  1285. key_argument = lambda zipped_items: key(zipped_items[key_offset])
  1286. else:
  1287. # if key_list contains multiple items, use itemgetter to return a
  1288. # tuple of items, which we pass as *args to the key function
  1289. get_key_items = itemgetter(*key_list)
  1290. key_argument = lambda zipped_items: key(
  1291. *get_key_items(zipped_items)
  1292. )
  1293. return list(
  1294. zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
  1295. )
  1296. def unzip(iterable):
  1297. """The inverse of :func:`zip`, this function disaggregates the elements
  1298. of the zipped *iterable*.
  1299. The ``i``-th iterable contains the ``i``-th element from each element
  1300. of the zipped iterable. The first element is used to to determine the
  1301. length of the remaining elements.
  1302. >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
  1303. >>> letters, numbers = unzip(iterable)
  1304. >>> list(letters)
  1305. ['a', 'b', 'c', 'd']
  1306. >>> list(numbers)
  1307. [1, 2, 3, 4]
  1308. This is similar to using ``zip(*iterable)``, but it avoids reading
  1309. *iterable* into memory. Note, however, that this function uses
  1310. :func:`itertools.tee` and thus may require significant storage.
  1311. """
  1312. head, iterable = spy(iter(iterable))
  1313. if not head:
  1314. # empty iterable, e.g. zip([], [], [])
  1315. return ()
  1316. # spy returns a one-length iterable as head
  1317. head = head[0]
  1318. iterables = tee(iterable, len(head))
  1319. def itemgetter(i):
  1320. def getter(obj):
  1321. try:
  1322. return obj[i]
  1323. except IndexError:
  1324. # basically if we have an iterable like
  1325. # iter([(1, 2, 3), (4, 5), (6,)])
  1326. # the second unzipped iterable would fail at the third tuple
  1327. # since it would try to access tup[1]
  1328. # same with the third unzipped iterable and the second tuple
  1329. # to support these "improperly zipped" iterables,
  1330. # we create a custom itemgetter
  1331. # which just stops the unzipped iterables
  1332. # at first length mismatch
  1333. raise StopIteration
  1334. return getter
  1335. return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
  1336. def divide(n, iterable):
  1337. """Divide the elements from *iterable* into *n* parts, maintaining
  1338. order.
  1339. >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
  1340. >>> list(group_1)
  1341. [1, 2, 3]
  1342. >>> list(group_2)
  1343. [4, 5, 6]
  1344. If the length of *iterable* is not evenly divisible by *n*, then the
  1345. length of the returned iterables will not be identical:
  1346. >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
  1347. >>> [list(c) for c in children]
  1348. [[1, 2, 3], [4, 5], [6, 7]]
  1349. If the length of the iterable is smaller than n, then the last returned
  1350. iterables will be empty:
  1351. >>> children = divide(5, [1, 2, 3])
  1352. >>> [list(c) for c in children]
  1353. [[1], [2], [3], [], []]
  1354. This function will exhaust the iterable before returning and may require
  1355. significant storage. If order is not important, see :func:`distribute`,
  1356. which does not first pull the iterable into memory.
  1357. """
  1358. if n < 1:
  1359. raise ValueError('n must be at least 1')
  1360. try:
  1361. iterable[:0]
  1362. except TypeError:
  1363. seq = tuple(iterable)
  1364. else:
  1365. seq = iterable
  1366. q, r = divmod(len(seq), n)
  1367. ret = []
  1368. stop = 0
  1369. for i in range(1, n + 1):
  1370. start = stop
  1371. stop += q + 1 if i <= r else q
  1372. ret.append(iter(seq[start:stop]))
  1373. return ret
  1374. def always_iterable(obj, base_type=(str, bytes)):
  1375. """If *obj* is iterable, return an iterator over its items::
  1376. >>> obj = (1, 2, 3)
  1377. >>> list(always_iterable(obj))
  1378. [1, 2, 3]
  1379. If *obj* is not iterable, return a one-item iterable containing *obj*::
  1380. >>> obj = 1
  1381. >>> list(always_iterable(obj))
  1382. [1]
  1383. If *obj* is ``None``, return an empty iterable:
  1384. >>> obj = None
  1385. >>> list(always_iterable(None))
  1386. []
  1387. By default, binary and text strings are not considered iterable::
  1388. >>> obj = 'foo'
  1389. >>> list(always_iterable(obj))
  1390. ['foo']
  1391. If *base_type* is set, objects for which ``isinstance(obj, base_type)``
  1392. returns ``True`` won't be considered iterable.
  1393. >>> obj = {'a': 1}
  1394. >>> list(always_iterable(obj)) # Iterate over the dict's keys
  1395. ['a']
  1396. >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
  1397. [{'a': 1}]
  1398. Set *base_type* to ``None`` to avoid any special handling and treat objects
  1399. Python considers iterable as iterable:
  1400. >>> obj = 'foo'
  1401. >>> list(always_iterable(obj, base_type=None))
  1402. ['f', 'o', 'o']
  1403. """
  1404. if obj is None:
  1405. return iter(())
  1406. if (base_type is not None) and isinstance(obj, base_type):
  1407. return iter((obj,))
  1408. try:
  1409. return iter(obj)
  1410. except TypeError:
  1411. return iter((obj,))
  1412. def adjacent(predicate, iterable, distance=1):
  1413. """Return an iterable over `(bool, item)` tuples where the `item` is
  1414. drawn from *iterable* and the `bool` indicates whether
  1415. that item satisfies the *predicate* or is adjacent to an item that does.
  1416. For example, to find whether items are adjacent to a ``3``::
  1417. >>> list(adjacent(lambda x: x == 3, range(6)))
  1418. [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
  1419. Set *distance* to change what counts as adjacent. For example, to find
  1420. whether items are two places away from a ``3``:
  1421. >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
  1422. [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
  1423. This is useful for contextualizing the results of a search function.
  1424. For example, a code comparison tool might want to identify lines that
  1425. have changed, but also surrounding lines to give the viewer of the diff
  1426. context.
  1427. The predicate function will only be called once for each item in the
  1428. iterable.
  1429. See also :func:`groupby_transform`, which can be used with this function
  1430. to group ranges of items with the same `bool` value.
  1431. """
  1432. # Allow distance=0 mainly for testing that it reproduces results with map()
  1433. if distance < 0:
  1434. raise ValueError('distance must be at least 0')
  1435. i1, i2 = tee(iterable)
  1436. padding = [False] * distance
  1437. selected = chain(padding, map(predicate, i1), padding)
  1438. adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
  1439. return zip(adjacent_to_selected, i2)
  1440. def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
  1441. """An extension of :func:`itertools.groupby` that can apply transformations
  1442. to the grouped data.
  1443. * *keyfunc* is a function computing a key value for each item in *iterable*
  1444. * *valuefunc* is a function that transforms the individual items from
  1445. *iterable* after grouping
  1446. * *reducefunc* is a function that transforms each group of items
  1447. >>> iterable = 'aAAbBBcCC'
  1448. >>> keyfunc = lambda k: k.upper()
  1449. >>> valuefunc = lambda v: v.lower()
  1450. >>> reducefunc = lambda g: ''.join(g)
  1451. >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
  1452. [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
  1453. Each optional argument defaults to an identity function if not specified.
  1454. :func:`groupby_transform` is useful when grouping elements of an iterable
  1455. using a separate iterable as the key. To do this, :func:`zip` the iterables
  1456. and pass a *keyfunc* that extracts the first element and a *valuefunc*
  1457. that extracts the second element::
  1458. >>> from operator import itemgetter
  1459. >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
  1460. >>> values = 'abcdefghi'
  1461. >>> iterable = zip(keys, values)
  1462. >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
  1463. >>> [(k, ''.join(g)) for k, g in grouper]
  1464. [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
  1465. Note that the order of items in the iterable is significant.
  1466. Only adjacent items are grouped together, so if you don't want any
  1467. duplicate groups, you should sort the iterable by the key function.
  1468. """
  1469. ret = groupby(iterable, keyfunc)
  1470. if valuefunc:
  1471. ret = ((k, map(valuefunc, g)) for k, g in ret)
  1472. if reducefunc:
  1473. ret = ((k, reducefunc(g)) for k, g in ret)
  1474. return ret
  1475. class numeric_range(abc.Sequence, abc.Hashable):
  1476. """An extension of the built-in ``range()`` function whose arguments can
  1477. be any orderable numeric type.
  1478. With only *stop* specified, *start* defaults to ``0`` and *step*
  1479. defaults to ``1``. The output items will match the type of *stop*:
  1480. >>> list(numeric_range(3.5))
  1481. [0.0, 1.0, 2.0, 3.0]
  1482. With only *start* and *stop* specified, *step* defaults to ``1``. The
  1483. output items will match the type of *start*:
  1484. >>> from decimal import Decimal
  1485. >>> start = Decimal('2.1')
  1486. >>> stop = Decimal('5.1')
  1487. >>> list(numeric_range(start, stop))
  1488. [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
  1489. With *start*, *stop*, and *step* specified the output items will match
  1490. the type of ``start + step``:
  1491. >>> from fractions import Fraction
  1492. >>> start = Fraction(1, 2) # Start at 1/2
  1493. >>> stop = Fraction(5, 2) # End at 5/2
  1494. >>> step = Fraction(1, 2) # Count by 1/2
  1495. >>> list(numeric_range(start, stop, step))
  1496. [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
  1497. If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
  1498. >>> list(numeric_range(3, -1, -1.0))
  1499. [3.0, 2.0, 1.0, 0.0]
  1500. Be aware of the limitations of floating point numbers; the representation
  1501. of the yielded numbers may be surprising.
  1502. ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
  1503. is a ``datetime.timedelta`` object:
  1504. >>> import datetime
  1505. >>> start = datetime.datetime(2019, 1, 1)
  1506. >>> stop = datetime.datetime(2019, 1, 3)
  1507. >>> step = datetime.timedelta(days=1)
  1508. >>> items = iter(numeric_range(start, stop, step))
  1509. >>> next(items)
  1510. datetime.datetime(2019, 1, 1, 0, 0)
  1511. >>> next(items)
  1512. datetime.datetime(2019, 1, 2, 0, 0)
  1513. """
  1514. _EMPTY_HASH = hash(range(0, 0))
  1515. def __init__(self, *args):
  1516. argc = len(args)
  1517. if argc == 1:
  1518. (self._stop,) = args
  1519. self._start = type(self._stop)(0)
  1520. self._step = type(self._stop - self._start)(1)
  1521. elif argc == 2:
  1522. self._start, self._stop = args
  1523. self._step = type(self._stop - self._start)(1)
  1524. elif argc == 3:
  1525. self._start, self._stop, self._step = args
  1526. elif argc == 0:
  1527. raise TypeError(
  1528. 'numeric_range expected at least '
  1529. '1 argument, got {}'.format(argc)
  1530. )
  1531. else:
  1532. raise TypeError(
  1533. 'numeric_range expected at most '
  1534. '3 arguments, got {}'.format(argc)
  1535. )
  1536. self._zero = type(self._step)(0)
  1537. if self._step == self._zero:
  1538. raise ValueError('numeric_range() arg 3 must not be zero')
  1539. self._growing = self._step > self._zero
  1540. self._init_len()
  1541. def __bool__(self):
  1542. if self._growing:
  1543. return self._start < self._stop
  1544. else:
  1545. return self._start > self._stop
  1546. def __contains__(self, elem):
  1547. if self._growing:
  1548. if self._start <= elem < self._stop:
  1549. return (elem - self._start) % self._step == self._zero
  1550. else:
  1551. if self._start >= elem > self._stop:
  1552. return (self._start - elem) % (-self._step) == self._zero
  1553. return False
  1554. def __eq__(self, other):
  1555. if isinstance(other, numeric_range):
  1556. empty_self = not bool(self)
  1557. empty_other = not bool(other)
  1558. if empty_self or empty_other:
  1559. return empty_self and empty_other # True if both empty
  1560. else:
  1561. return (
  1562. self._start == other._start
  1563. and self._step == other._step
  1564. and self._get_by_index(-1) == other._get_by_index(-1)
  1565. )
  1566. else:
  1567. return False
  1568. def __getitem__(self, key):
  1569. if isinstance(key, int):
  1570. return self._get_by_index(key)
  1571. elif isinstance(key, slice):
  1572. step = self._step if key.step is None else key.step * self._step
  1573. if key.start is None or key.start <= -self._len:
  1574. start = self._start
  1575. elif key.start >= self._len:
  1576. start = self._stop
  1577. else: # -self._len < key.start < self._len
  1578. start = self._get_by_index(key.start)
  1579. if key.stop is None or key.stop >= self._len:
  1580. stop = self._stop
  1581. elif key.stop <= -self._len:
  1582. stop = self._start
  1583. else: # -self._len < key.stop < self._len
  1584. stop = self._get_by_index(key.stop)
  1585. return numeric_range(start, stop, step)
  1586. else:
  1587. raise TypeError(
  1588. 'numeric range indices must be '
  1589. 'integers or slices, not {}'.format(type(key).__name__)
  1590. )
  1591. def __hash__(self):
  1592. if self:
  1593. return hash((self._start, self._get_by_index(-1), self._step))
  1594. else:
  1595. return self._EMPTY_HASH
  1596. def __iter__(self):
  1597. values = (self._start + (n * self._step) for n in count())
  1598. if self._growing:
  1599. return takewhile(partial(gt, self._stop), values)
  1600. else:
  1601. return takewhile(partial(lt, self._stop), values)
  1602. def __len__(self):
  1603. return self._len
  1604. def _init_len(self):
  1605. if self._growing:
  1606. start = self._start
  1607. stop = self._stop
  1608. step = self._step
  1609. else:
  1610. start = self._stop
  1611. stop = self._start
  1612. step = -self._step
  1613. distance = stop - start
  1614. if distance <= self._zero:
  1615. self._len = 0
  1616. else: # distance > 0 and step > 0: regular euclidean division
  1617. q, r = divmod(distance, step)
  1618. self._len = int(q) + int(r != self._zero)
  1619. def __reduce__(self):
  1620. return numeric_range, (self._start, self._stop, self._step)
  1621. def __repr__(self):
  1622. if self._step == 1:
  1623. return "numeric_range({}, {})".format(
  1624. repr(self._start), repr(self._stop)
  1625. )
  1626. else:
  1627. return "numeric_range({}, {}, {})".format(
  1628. repr(self._start), repr(self._stop), repr(self._step)
  1629. )
  1630. def __reversed__(self):
  1631. return iter(
  1632. numeric_range(
  1633. self._get_by_index(-1), self._start - self._step, -self._step
  1634. )
  1635. )
  1636. def count(self, value):
  1637. return int(value in self)
  1638. def index(self, value):
  1639. if self._growing:
  1640. if self._start <= value < self._stop:
  1641. q, r = divmod(value - self._start, self._step)
  1642. if r == self._zero:
  1643. return int(q)
  1644. else:
  1645. if self._start >= value > self._stop:
  1646. q, r = divmod(self._start - value, -self._step)
  1647. if r == self._zero:
  1648. return int(q)
  1649. raise ValueError("{} is not in numeric range".format(value))
  1650. def _get_by_index(self, i):
  1651. if i < 0:
  1652. i += self._len
  1653. if i < 0 or i >= self._len:
  1654. raise IndexError("numeric range object index out of range")
  1655. return self._start + i * self._step
  1656. def count_cycle(iterable, n=None):
  1657. """Cycle through the items from *iterable* up to *n* times, yielding
  1658. the number of completed cycles along with each item. If *n* is omitted the
  1659. process repeats indefinitely.
  1660. >>> list(count_cycle('AB', 3))
  1661. [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
  1662. """
  1663. iterable = tuple(iterable)
  1664. if not iterable:
  1665. return iter(())
  1666. counter = count() if n is None else range(n)
  1667. return ((i, item) for i in counter for item in iterable)
  1668. def mark_ends(iterable):
  1669. """Yield 3-tuples of the form ``(is_first, is_last, item)``.
  1670. >>> list(mark_ends('ABC'))
  1671. [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
  1672. Use this when looping over an iterable to take special action on its first
  1673. and/or last items:
  1674. >>> iterable = ['Header', 100, 200, 'Footer']
  1675. >>> total = 0
  1676. >>> for is_first, is_last, item in mark_ends(iterable):
  1677. ... if is_first:
  1678. ... continue # Skip the header
  1679. ... if is_last:
  1680. ... continue # Skip the footer
  1681. ... total += item
  1682. >>> print(total)
  1683. 300
  1684. """
  1685. it = iter(iterable)
  1686. try:
  1687. b = next(it)
  1688. except StopIteration:
  1689. return
  1690. try:
  1691. for i in count():
  1692. a = b
  1693. b = next(it)
  1694. yield i == 0, False, a
  1695. except StopIteration:
  1696. yield i == 0, True, a
  1697. def locate(iterable, pred=bool, window_size=None):
  1698. """Yield the index of each item in *iterable* for which *pred* returns
  1699. ``True``.
  1700. *pred* defaults to :func:`bool`, which will select truthy items:
  1701. >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
  1702. [1, 2, 4]
  1703. Set *pred* to a custom function to, e.g., find the indexes for a particular
  1704. item.
  1705. >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
  1706. [1, 3]
  1707. If *window_size* is given, then the *pred* function will be called with
  1708. that many items. This enables searching for sub-sequences:
  1709. >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
  1710. >>> pred = lambda *args: args == (1, 2, 3)
  1711. >>> list(locate(iterable, pred=pred, window_size=3))
  1712. [1, 5, 9]
  1713. Use with :func:`seekable` to find indexes and then retrieve the associated
  1714. items:
  1715. >>> from itertools import count
  1716. >>> from more_itertools import seekable
  1717. >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
  1718. >>> it = seekable(source)
  1719. >>> pred = lambda x: x > 100
  1720. >>> indexes = locate(it, pred=pred)
  1721. >>> i = next(indexes)
  1722. >>> it.seek(i)
  1723. >>> next(it)
  1724. 106
  1725. """
  1726. if window_size is None:
  1727. return compress(count(), map(pred, iterable))
  1728. if window_size < 1:
  1729. raise ValueError('window size must be at least 1')
  1730. it = windowed(iterable, window_size, fillvalue=_marker)
  1731. return compress(count(), starmap(pred, it))
  1732. def lstrip(iterable, pred):
  1733. """Yield the items from *iterable*, but strip any from the beginning
  1734. for which *pred* returns ``True``.
  1735. For example, to remove a set of items from the start of an iterable:
  1736. >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
  1737. >>> pred = lambda x: x in {None, False, ''}
  1738. >>> list(lstrip(iterable, pred))
  1739. [1, 2, None, 3, False, None]
  1740. This function is analogous to to :func:`str.lstrip`, and is essentially
  1741. an wrapper for :func:`itertools.dropwhile`.
  1742. """
  1743. return dropwhile(pred, iterable)
  1744. def rstrip(iterable, pred):
  1745. """Yield the items from *iterable*, but strip any from the end
  1746. for which *pred* returns ``True``.
  1747. For example, to remove a set of items from the end of an iterable:
  1748. >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
  1749. >>> pred = lambda x: x in {None, False, ''}
  1750. >>> list(rstrip(iterable, pred))
  1751. [None, False, None, 1, 2, None, 3]
  1752. This function is analogous to :func:`str.rstrip`.
  1753. """
  1754. cache = []
  1755. cache_append = cache.append
  1756. cache_clear = cache.clear
  1757. for x in iterable:
  1758. if pred(x):
  1759. cache_append(x)
  1760. else:
  1761. yield from cache
  1762. cache_clear()
  1763. yield x
  1764. def strip(iterable, pred):
  1765. """Yield the items from *iterable*, but strip any from the
  1766. beginning and end for which *pred* returns ``True``.
  1767. For example, to remove a set of items from both ends of an iterable:
  1768. >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
  1769. >>> pred = lambda x: x in {None, False, ''}
  1770. >>> list(strip(iterable, pred))
  1771. [1, 2, None, 3]
  1772. This function is analogous to :func:`str.strip`.
  1773. """
  1774. return rstrip(lstrip(iterable, pred), pred)
  1775. class islice_extended:
  1776. """An extension of :func:`itertools.islice` that supports negative values
  1777. for *stop*, *start*, and *step*.
  1778. >>> iterable = iter('abcdefgh')
  1779. >>> list(islice_extended(iterable, -4, -1))
  1780. ['e', 'f', 'g']
  1781. Slices with negative values require some caching of *iterable*, but this
  1782. function takes care to minimize the amount of memory required.
  1783. For example, you can use a negative step with an infinite iterator:
  1784. >>> from itertools import count
  1785. >>> list(islice_extended(count(), 110, 99, -2))
  1786. [110, 108, 106, 104, 102, 100]
  1787. You can also use slice notation directly:
  1788. >>> iterable = map(str, count())
  1789. >>> it = islice_extended(iterable)[10:20:2]
  1790. >>> list(it)
  1791. ['10', '12', '14', '16', '18']
  1792. """
  1793. def __init__(self, iterable, *args):
  1794. it = iter(iterable)
  1795. if args:
  1796. self._iterable = _islice_helper(it, slice(*args))
  1797. else:
  1798. self._iterable = it
  1799. def __iter__(self):
  1800. return self
  1801. def __next__(self):
  1802. return next(self._iterable)
  1803. def __getitem__(self, key):
  1804. if isinstance(key, slice):
  1805. return islice_extended(_islice_helper(self._iterable, key))
  1806. raise TypeError('islice_extended.__getitem__ argument must be a slice')
  1807. def _islice_helper(it, s):
  1808. start = s.start
  1809. stop = s.stop
  1810. if s.step == 0:
  1811. raise ValueError('step argument must be a non-zero integer or None.')
  1812. step = s.step or 1
  1813. if step > 0:
  1814. start = 0 if (start is None) else start
  1815. if start < 0:
  1816. # Consume all but the last -start items
  1817. cache = deque(enumerate(it, 1), maxlen=-start)
  1818. len_iter = cache[-1][0] if cache else 0
  1819. # Adjust start to be positive
  1820. i = max(len_iter + start, 0)
  1821. # Adjust stop to be positive
  1822. if stop is None:
  1823. j = len_iter
  1824. elif stop >= 0:
  1825. j = min(stop, len_iter)
  1826. else:
  1827. j = max(len_iter + stop, 0)
  1828. # Slice the cache
  1829. n = j - i
  1830. if n <= 0:
  1831. return
  1832. for index, item in islice(cache, 0, n, step):
  1833. yield item
  1834. elif (stop is not None) and (stop < 0):
  1835. # Advance to the start position
  1836. next(islice(it, start, start), None)
  1837. # When stop is negative, we have to carry -stop items while
  1838. # iterating
  1839. cache = deque(islice(it, -stop), maxlen=-stop)
  1840. for index, item in enumerate(it):
  1841. cached_item = cache.popleft()
  1842. if index % step == 0:
  1843. yield cached_item
  1844. cache.append(item)
  1845. else:
  1846. # When both start and stop are positive we have the normal case
  1847. yield from islice(it, start, stop, step)
  1848. else:
  1849. start = -1 if (start is None) else start
  1850. if (stop is not None) and (stop < 0):
  1851. # Consume all but the last items
  1852. n = -stop - 1
  1853. cache = deque(enumerate(it, 1), maxlen=n)
  1854. len_iter = cache[-1][0] if cache else 0
  1855. # If start and stop are both negative they are comparable and
  1856. # we can just slice. Otherwise we can adjust start to be negative
  1857. # and then slice.
  1858. if start < 0:
  1859. i, j = start, stop
  1860. else:
  1861. i, j = min(start - len_iter, -1), None
  1862. for index, item in list(cache)[i:j:step]:
  1863. yield item
  1864. else:
  1865. # Advance to the stop position
  1866. if stop is not None:
  1867. m = stop + 1
  1868. next(islice(it, m, m), None)
  1869. # stop is positive, so if start is negative they are not comparable
  1870. # and we need the rest of the items.
  1871. if start < 0:
  1872. i = start
  1873. n = None
  1874. # stop is None and start is positive, so we just need items up to
  1875. # the start index.
  1876. elif stop is None:
  1877. i = None
  1878. n = start + 1
  1879. # Both stop and start are positive, so they are comparable.
  1880. else:
  1881. i = None
  1882. n = start - stop
  1883. if n <= 0:
  1884. return
  1885. cache = list(islice(it, n))
  1886. yield from cache[i::step]
  1887. def always_reversible(iterable):
  1888. """An extension of :func:`reversed` that supports all iterables, not
  1889. just those which implement the ``Reversible`` or ``Sequence`` protocols.
  1890. >>> print(*always_reversible(x for x in range(3)))
  1891. 2 1 0
  1892. If the iterable is already reversible, this function returns the
  1893. result of :func:`reversed()`. If the iterable is not reversible,
  1894. this function will cache the remaining items in the iterable and
  1895. yield them in reverse order, which may require significant storage.
  1896. """
  1897. try:
  1898. return reversed(iterable)
  1899. except TypeError:
  1900. return reversed(list(iterable))
  1901. def consecutive_groups(iterable, ordering=lambda x: x):
  1902. """Yield groups of consecutive items using :func:`itertools.groupby`.
  1903. The *ordering* function determines whether two items are adjacent by
  1904. returning their position.
  1905. By default, the ordering function is the identity function. This is
  1906. suitable for finding runs of numbers:
  1907. >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
  1908. >>> for group in consecutive_groups(iterable):
  1909. ... print(list(group))
  1910. [1]
  1911. [10, 11, 12]
  1912. [20]
  1913. [30, 31, 32, 33]
  1914. [40]
  1915. For finding runs of adjacent letters, try using the :meth:`index` method
  1916. of a string of letters:
  1917. >>> from string import ascii_lowercase
  1918. >>> iterable = 'abcdfgilmnop'
  1919. >>> ordering = ascii_lowercase.index
  1920. >>> for group in consecutive_groups(iterable, ordering):
  1921. ... print(list(group))
  1922. ['a', 'b', 'c', 'd']
  1923. ['f', 'g']
  1924. ['i']
  1925. ['l', 'm', 'n', 'o', 'p']
  1926. Each group of consecutive items is an iterator that shares it source with
  1927. *iterable*. When an an output group is advanced, the previous group is
  1928. no longer available unless its elements are copied (e.g., into a ``list``).
  1929. >>> iterable = [1, 2, 11, 12, 21, 22]
  1930. >>> saved_groups = []
  1931. >>> for group in consecutive_groups(iterable):
  1932. ... saved_groups.append(list(group)) # Copy group elements
  1933. >>> saved_groups
  1934. [[1, 2], [11, 12], [21, 22]]
  1935. """
  1936. for k, g in groupby(
  1937. enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
  1938. ):
  1939. yield map(itemgetter(1), g)
  1940. def difference(iterable, func=sub, *, initial=None):
  1941. """This function is the inverse of :func:`itertools.accumulate`. By default
  1942. it will compute the first difference of *iterable* using
  1943. :func:`operator.sub`:
  1944. >>> from itertools import accumulate
  1945. >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
  1946. >>> list(difference(iterable))
  1947. [0, 1, 2, 3, 4]
  1948. *func* defaults to :func:`operator.sub`, but other functions can be
  1949. specified. They will be applied as follows::
  1950. A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
  1951. For example, to do progressive division:
  1952. >>> iterable = [1, 2, 6, 24, 120]
  1953. >>> func = lambda x, y: x // y
  1954. >>> list(difference(iterable, func))
  1955. [1, 2, 3, 4, 5]
  1956. If the *initial* keyword is set, the first element will be skipped when
  1957. computing successive differences.
  1958. >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
  1959. >>> list(difference(it, initial=10))
  1960. [1, 2, 3]
  1961. """
  1962. a, b = tee(iterable)
  1963. try:
  1964. first = [next(b)]
  1965. except StopIteration:
  1966. return iter([])
  1967. if initial is not None:
  1968. first = []
  1969. return chain(first, starmap(func, zip(b, a)))
  1970. class SequenceView(Sequence):
  1971. """Return a read-only view of the sequence object *target*.
  1972. :class:`SequenceView` objects are analogous to Python's built-in
  1973. "dictionary view" types. They provide a dynamic view of a sequence's items,
  1974. meaning that when the sequence updates, so does the view.
  1975. >>> seq = ['0', '1', '2']
  1976. >>> view = SequenceView(seq)
  1977. >>> view
  1978. SequenceView(['0', '1', '2'])
  1979. >>> seq.append('3')
  1980. >>> view
  1981. SequenceView(['0', '1', '2', '3'])
  1982. Sequence views support indexing, slicing, and length queries. They act
  1983. like the underlying sequence, except they don't allow assignment:
  1984. >>> view[1]
  1985. '1'
  1986. >>> view[1:-1]
  1987. ['1', '2']
  1988. >>> len(view)
  1989. 4
  1990. Sequence views are useful as an alternative to copying, as they don't
  1991. require (much) extra storage.
  1992. """
  1993. def __init__(self, target):
  1994. if not isinstance(target, Sequence):
  1995. raise TypeError
  1996. self._target = target
  1997. def __getitem__(self, index):
  1998. return self._target[index]
  1999. def __len__(self):
  2000. return len(self._target)
  2001. def __repr__(self):
  2002. return '{}({})'.format(self.__class__.__name__, repr(self._target))
  2003. class seekable:
  2004. """Wrap an iterator to allow for seeking backward and forward. This
  2005. progressively caches the items in the source iterable so they can be
  2006. re-visited.
  2007. Call :meth:`seek` with an index to seek to that position in the source
  2008. iterable.
  2009. To "reset" an iterator, seek to ``0``:
  2010. >>> from itertools import count
  2011. >>> it = seekable((str(n) for n in count()))
  2012. >>> next(it), next(it), next(it)
  2013. ('0', '1', '2')
  2014. >>> it.seek(0)
  2015. >>> next(it), next(it), next(it)
  2016. ('0', '1', '2')
  2017. >>> next(it)
  2018. '3'
  2019. You can also seek forward:
  2020. >>> it = seekable((str(n) for n in range(20)))
  2021. >>> it.seek(10)
  2022. >>> next(it)
  2023. '10'
  2024. >>> it.seek(20) # Seeking past the end of the source isn't a problem
  2025. >>> list(it)
  2026. []
  2027. >>> it.seek(0) # Resetting works even after hitting the end
  2028. >>> next(it), next(it), next(it)
  2029. ('0', '1', '2')
  2030. Call :meth:`peek` to look ahead one item without advancing the iterator:
  2031. >>> it = seekable('1234')
  2032. >>> it.peek()
  2033. '1'
  2034. >>> list(it)
  2035. ['1', '2', '3', '4']
  2036. >>> it.peek(default='empty')
  2037. 'empty'
  2038. Before the iterator is at its end, calling :func:`bool` on it will return
  2039. ``True``. After it will return ``False``:
  2040. >>> it = seekable('5678')
  2041. >>> bool(it)
  2042. True
  2043. >>> list(it)
  2044. ['5', '6', '7', '8']
  2045. >>> bool(it)
  2046. False
  2047. You may view the contents of the cache with the :meth:`elements` method.
  2048. That returns a :class:`SequenceView`, a view that updates automatically:
  2049. >>> it = seekable((str(n) for n in range(10)))
  2050. >>> next(it), next(it), next(it)
  2051. ('0', '1', '2')
  2052. >>> elements = it.elements()
  2053. >>> elements
  2054. SequenceView(['0', '1', '2'])
  2055. >>> next(it)
  2056. '3'
  2057. >>> elements
  2058. SequenceView(['0', '1', '2', '3'])
  2059. By default, the cache grows as the source iterable progresses, so beware of
  2060. wrapping very large or infinite iterables. Supply *maxlen* to limit the
  2061. size of the cache (this of course limits how far back you can seek).
  2062. >>> from itertools import count
  2063. >>> it = seekable((str(n) for n in count()), maxlen=2)
  2064. >>> next(it), next(it), next(it), next(it)
  2065. ('0', '1', '2', '3')
  2066. >>> list(it.elements())
  2067. ['2', '3']
  2068. >>> it.seek(0)
  2069. >>> next(it), next(it), next(it), next(it)
  2070. ('2', '3', '4', '5')
  2071. >>> next(it)
  2072. '6'
  2073. """
  2074. def __init__(self, iterable, maxlen=None):
  2075. self._source = iter(iterable)
  2076. if maxlen is None:
  2077. self._cache = []
  2078. else:
  2079. self._cache = deque([], maxlen)
  2080. self._index = None
  2081. def __iter__(self):
  2082. return self
  2083. def __next__(self):
  2084. if self._index is not None:
  2085. try:
  2086. item = self._cache[self._index]
  2087. except IndexError:
  2088. self._index = None
  2089. else:
  2090. self._index += 1
  2091. return item
  2092. item = next(self._source)
  2093. self._cache.append(item)
  2094. return item
  2095. def __bool__(self):
  2096. try:
  2097. self.peek()
  2098. except StopIteration:
  2099. return False
  2100. return True
  2101. def peek(self, default=_marker):
  2102. try:
  2103. peeked = next(self)
  2104. except StopIteration:
  2105. if default is _marker:
  2106. raise
  2107. return default
  2108. if self._index is None:
  2109. self._index = len(self._cache)
  2110. self._index -= 1
  2111. return peeked
  2112. def elements(self):
  2113. return SequenceView(self._cache)
  2114. def seek(self, index):
  2115. self._index = index
  2116. remainder = index - len(self._cache)
  2117. if remainder > 0:
  2118. consume(self, remainder)
  2119. class run_length:
  2120. """
  2121. :func:`run_length.encode` compresses an iterable with run-length encoding.
  2122. It yields groups of repeated items with the count of how many times they
  2123. were repeated:
  2124. >>> uncompressed = 'abbcccdddd'
  2125. >>> list(run_length.encode(uncompressed))
  2126. [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
  2127. :func:`run_length.decode` decompresses an iterable that was previously
  2128. compressed with run-length encoding. It yields the items of the
  2129. decompressed iterable:
  2130. >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
  2131. >>> list(run_length.decode(compressed))
  2132. ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
  2133. """
  2134. @staticmethod
  2135. def encode(iterable):
  2136. return ((k, ilen(g)) for k, g in groupby(iterable))
  2137. @staticmethod
  2138. def decode(iterable):
  2139. return chain.from_iterable(repeat(k, n) for k, n in iterable)
  2140. def exactly_n(iterable, n, predicate=bool):
  2141. """Return ``True`` if exactly ``n`` items in the iterable are ``True``
  2142. according to the *predicate* function.
  2143. >>> exactly_n([True, True, False], 2)
  2144. True
  2145. >>> exactly_n([True, True, False], 1)
  2146. False
  2147. >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
  2148. True
  2149. The iterable will be advanced until ``n + 1`` truthy items are encountered,
  2150. so avoid calling it on infinite iterables.
  2151. """
  2152. return len(take(n + 1, filter(predicate, iterable))) == n
  2153. def circular_shifts(iterable):
  2154. """Return a list of circular shifts of *iterable*.
  2155. >>> circular_shifts(range(4))
  2156. [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
  2157. """
  2158. lst = list(iterable)
  2159. return take(len(lst), windowed(cycle(lst), len(lst)))
  2160. def make_decorator(wrapping_func, result_index=0):
  2161. """Return a decorator version of *wrapping_func*, which is a function that
  2162. modifies an iterable. *result_index* is the position in that function's
  2163. signature where the iterable goes.
  2164. This lets you use itertools on the "production end," i.e. at function
  2165. definition. This can augment what the function returns without changing the
  2166. function's code.
  2167. For example, to produce a decorator version of :func:`chunked`:
  2168. >>> from more_itertools import chunked
  2169. >>> chunker = make_decorator(chunked, result_index=0)
  2170. >>> @chunker(3)
  2171. ... def iter_range(n):
  2172. ... return iter(range(n))
  2173. ...
  2174. >>> list(iter_range(9))
  2175. [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
  2176. To only allow truthy items to be returned:
  2177. >>> truth_serum = make_decorator(filter, result_index=1)
  2178. >>> @truth_serum(bool)
  2179. ... def boolean_test():
  2180. ... return [0, 1, '', ' ', False, True]
  2181. ...
  2182. >>> list(boolean_test())
  2183. [1, ' ', True]
  2184. The :func:`peekable` and :func:`seekable` wrappers make for practical
  2185. decorators:
  2186. >>> from more_itertools import peekable
  2187. >>> peekable_function = make_decorator(peekable)
  2188. >>> @peekable_function()
  2189. ... def str_range(*args):
  2190. ... return (str(x) for x in range(*args))
  2191. ...
  2192. >>> it = str_range(1, 20, 2)
  2193. >>> next(it), next(it), next(it)
  2194. ('1', '3', '5')
  2195. >>> it.peek()
  2196. '7'
  2197. >>> next(it)
  2198. '7'
  2199. """
  2200. # See https://sites.google.com/site/bbayles/index/decorator_factory for
  2201. # notes on how this works.
  2202. def decorator(*wrapping_args, **wrapping_kwargs):
  2203. def outer_wrapper(f):
  2204. def inner_wrapper(*args, **kwargs):
  2205. result = f(*args, **kwargs)
  2206. wrapping_args_ = list(wrapping_args)
  2207. wrapping_args_.insert(result_index, result)
  2208. return wrapping_func(*wrapping_args_, **wrapping_kwargs)
  2209. return inner_wrapper
  2210. return outer_wrapper
  2211. return decorator
  2212. def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
  2213. """Return a dictionary that maps the items in *iterable* to categories
  2214. defined by *keyfunc*, transforms them with *valuefunc*, and
  2215. then summarizes them by category with *reducefunc*.
  2216. *valuefunc* defaults to the identity function if it is unspecified.
  2217. If *reducefunc* is unspecified, no summarization takes place:
  2218. >>> keyfunc = lambda x: x.upper()
  2219. >>> result = map_reduce('abbccc', keyfunc)
  2220. >>> sorted(result.items())
  2221. [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
  2222. Specifying *valuefunc* transforms the categorized items:
  2223. >>> keyfunc = lambda x: x.upper()
  2224. >>> valuefunc = lambda x: 1
  2225. >>> result = map_reduce('abbccc', keyfunc, valuefunc)
  2226. >>> sorted(result.items())
  2227. [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
  2228. Specifying *reducefunc* summarizes the categorized items:
  2229. >>> keyfunc = lambda x: x.upper()
  2230. >>> valuefunc = lambda x: 1
  2231. >>> reducefunc = sum
  2232. >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
  2233. >>> sorted(result.items())
  2234. [('A', 1), ('B', 2), ('C', 3)]
  2235. You may want to filter the input iterable before applying the map/reduce
  2236. procedure:
  2237. >>> all_items = range(30)
  2238. >>> items = [x for x in all_items if 10 <= x <= 20] # Filter
  2239. >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
  2240. >>> categories = map_reduce(items, keyfunc=keyfunc)
  2241. >>> sorted(categories.items())
  2242. [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
  2243. >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
  2244. >>> sorted(summaries.items())
  2245. [(0, 90), (1, 75)]
  2246. Note that all items in the iterable are gathered into a list before the
  2247. summarization step, which may require significant storage.
  2248. The returned object is a :obj:`collections.defaultdict` with the
  2249. ``default_factory`` set to ``None``, such that it behaves like a normal
  2250. dictionary.
  2251. """
  2252. valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
  2253. ret = defaultdict(list)
  2254. for item in iterable:
  2255. key = keyfunc(item)
  2256. value = valuefunc(item)
  2257. ret[key].append(value)
  2258. if reducefunc is not None:
  2259. for key, value_list in ret.items():
  2260. ret[key] = reducefunc(value_list)
  2261. ret.default_factory = None
  2262. return ret
  2263. def rlocate(iterable, pred=bool, window_size=None):
  2264. """Yield the index of each item in *iterable* for which *pred* returns
  2265. ``True``, starting from the right and moving left.
  2266. *pred* defaults to :func:`bool`, which will select truthy items:
  2267. >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
  2268. [4, 2, 1]
  2269. Set *pred* to a custom function to, e.g., find the indexes for a particular
  2270. item:
  2271. >>> iterable = iter('abcb')
  2272. >>> pred = lambda x: x == 'b'
  2273. >>> list(rlocate(iterable, pred))
  2274. [3, 1]
  2275. If *window_size* is given, then the *pred* function will be called with
  2276. that many items. This enables searching for sub-sequences:
  2277. >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
  2278. >>> pred = lambda *args: args == (1, 2, 3)
  2279. >>> list(rlocate(iterable, pred=pred, window_size=3))
  2280. [9, 5, 1]
  2281. Beware, this function won't return anything for infinite iterables.
  2282. If *iterable* is reversible, ``rlocate`` will reverse it and search from
  2283. the right. Otherwise, it will search from the left and return the results
  2284. in reverse order.
  2285. See :func:`locate` to for other example applications.
  2286. """
  2287. if window_size is None:
  2288. try:
  2289. len_iter = len(iterable)
  2290. return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
  2291. except TypeError:
  2292. pass
  2293. return reversed(list(locate(iterable, pred, window_size)))
  2294. def replace(iterable, pred, substitutes, count=None, window_size=1):
  2295. """Yield the items from *iterable*, replacing the items for which *pred*
  2296. returns ``True`` with the items from the iterable *substitutes*.
  2297. >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
  2298. >>> pred = lambda x: x == 0
  2299. >>> substitutes = (2, 3)
  2300. >>> list(replace(iterable, pred, substitutes))
  2301. [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
  2302. If *count* is given, the number of replacements will be limited:
  2303. >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
  2304. >>> pred = lambda x: x == 0
  2305. >>> substitutes = [None]
  2306. >>> list(replace(iterable, pred, substitutes, count=2))
  2307. [1, 1, None, 1, 1, None, 1, 1, 0]
  2308. Use *window_size* to control the number of items passed as arguments to
  2309. *pred*. This allows for locating and replacing subsequences.
  2310. >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
  2311. >>> window_size = 3
  2312. >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
  2313. >>> substitutes = [3, 4] # Splice in these items
  2314. >>> list(replace(iterable, pred, substitutes, window_size=window_size))
  2315. [3, 4, 5, 3, 4, 5]
  2316. """
  2317. if window_size < 1:
  2318. raise ValueError('window_size must be at least 1')
  2319. # Save the substitutes iterable, since it's used more than once
  2320. substitutes = tuple(substitutes)
  2321. # Add padding such that the number of windows matches the length of the
  2322. # iterable
  2323. it = chain(iterable, [_marker] * (window_size - 1))
  2324. windows = windowed(it, window_size)
  2325. n = 0
  2326. for w in windows:
  2327. # If the current window matches our predicate (and we haven't hit
  2328. # our maximum number of replacements), splice in the substitutes
  2329. # and then consume the following windows that overlap with this one.
  2330. # For example, if the iterable is (0, 1, 2, 3, 4...)
  2331. # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
  2332. # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
  2333. if pred(*w):
  2334. if (count is None) or (n < count):
  2335. n += 1
  2336. yield from substitutes
  2337. consume(windows, window_size - 1)
  2338. continue
  2339. # If there was no match (or we've reached the replacement limit),
  2340. # yield the first item from the window.
  2341. if w and (w[0] is not _marker):
  2342. yield w[0]
  2343. def partitions(iterable):
  2344. """Yield all possible order-preserving partitions of *iterable*.
  2345. >>> iterable = 'abc'
  2346. >>> for part in partitions(iterable):
  2347. ... print([''.join(p) for p in part])
  2348. ['abc']
  2349. ['a', 'bc']
  2350. ['ab', 'c']
  2351. ['a', 'b', 'c']
  2352. This is unrelated to :func:`partition`.
  2353. """
  2354. sequence = list(iterable)
  2355. n = len(sequence)
  2356. for i in powerset(range(1, n)):
  2357. yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
  2358. def set_partitions(iterable, k=None):
  2359. """
  2360. Yield the set partitions of *iterable* into *k* parts. Set partitions are
  2361. not order-preserving.
  2362. >>> iterable = 'abc'
  2363. >>> for part in set_partitions(iterable, 2):
  2364. ... print([''.join(p) for p in part])
  2365. ['a', 'bc']
  2366. ['ab', 'c']
  2367. ['b', 'ac']
  2368. If *k* is not given, every set partition is generated.
  2369. >>> iterable = 'abc'
  2370. >>> for part in set_partitions(iterable):
  2371. ... print([''.join(p) for p in part])
  2372. ['abc']
  2373. ['a', 'bc']
  2374. ['ab', 'c']
  2375. ['b', 'ac']
  2376. ['a', 'b', 'c']
  2377. """
  2378. L = list(iterable)
  2379. n = len(L)
  2380. if k is not None:
  2381. if k < 1:
  2382. raise ValueError(
  2383. "Can't partition in a negative or zero number of groups"
  2384. )
  2385. elif k > n:
  2386. return
  2387. def set_partitions_helper(L, k):
  2388. n = len(L)
  2389. if k == 1:
  2390. yield [L]
  2391. elif n == k:
  2392. yield [[s] for s in L]
  2393. else:
  2394. e, *M = L
  2395. for p in set_partitions_helper(M, k - 1):
  2396. yield [[e], *p]
  2397. for p in set_partitions_helper(M, k):
  2398. for i in range(len(p)):
  2399. yield p[:i] + [[e] + p[i]] + p[i + 1 :]
  2400. if k is None:
  2401. for k in range(1, n + 1):
  2402. yield from set_partitions_helper(L, k)
  2403. else:
  2404. yield from set_partitions_helper(L, k)
  2405. class time_limited:
  2406. """
  2407. Yield items from *iterable* until *limit_seconds* have passed.
  2408. If the time limit expires before all items have been yielded, the
  2409. ``timed_out`` parameter will be set to ``True``.
  2410. >>> from time import sleep
  2411. >>> def generator():
  2412. ... yield 1
  2413. ... yield 2
  2414. ... sleep(0.2)
  2415. ... yield 3
  2416. >>> iterable = time_limited(0.1, generator())
  2417. >>> list(iterable)
  2418. [1, 2]
  2419. >>> iterable.timed_out
  2420. True
  2421. Note that the time is checked before each item is yielded, and iteration
  2422. stops if the time elapsed is greater than *limit_seconds*. If your time
  2423. limit is 1 second, but it takes 2 seconds to generate the first item from
  2424. the iterable, the function will run for 2 seconds and not yield anything.
  2425. """
  2426. def __init__(self, limit_seconds, iterable):
  2427. if limit_seconds < 0:
  2428. raise ValueError('limit_seconds must be positive')
  2429. self.limit_seconds = limit_seconds
  2430. self._iterable = iter(iterable)
  2431. self._start_time = monotonic()
  2432. self.timed_out = False
  2433. def __iter__(self):
  2434. return self
  2435. def __next__(self):
  2436. item = next(self._iterable)
  2437. if monotonic() - self._start_time > self.limit_seconds:
  2438. self.timed_out = True
  2439. raise StopIteration
  2440. return item
  2441. def only(iterable, default=None, too_long=None):
  2442. """If *iterable* has only one item, return it.
  2443. If it has zero items, return *default*.
  2444. If it has more than one item, raise the exception given by *too_long*,
  2445. which is ``ValueError`` by default.
  2446. >>> only([], default='missing')
  2447. 'missing'
  2448. >>> only([1])
  2449. 1
  2450. >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
  2451. Traceback (most recent call last):
  2452. ...
  2453. ValueError: Expected exactly one item in iterable, but got 1, 2,
  2454. and perhaps more.'
  2455. >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
  2456. Traceback (most recent call last):
  2457. ...
  2458. TypeError
  2459. Note that :func:`only` attempts to advance *iterable* twice to ensure there
  2460. is only one item. See :func:`spy` or :func:`peekable` to check
  2461. iterable contents less destructively.
  2462. """
  2463. it = iter(iterable)
  2464. first_value = next(it, default)
  2465. try:
  2466. second_value = next(it)
  2467. except StopIteration:
  2468. pass
  2469. else:
  2470. msg = (
  2471. 'Expected exactly one item in iterable, but got {!r}, {!r}, '
  2472. 'and perhaps more.'.format(first_value, second_value)
  2473. )
  2474. raise too_long or ValueError(msg)
  2475. return first_value
  2476. def ichunked(iterable, n):
  2477. """Break *iterable* into sub-iterables with *n* elements each.
  2478. :func:`ichunked` is like :func:`chunked`, but it yields iterables
  2479. instead of lists.
  2480. If the sub-iterables are read in order, the elements of *iterable*
  2481. won't be stored in memory.
  2482. If they are read out of order, :func:`itertools.tee` is used to cache
  2483. elements as necessary.
  2484. >>> from itertools import count
  2485. >>> all_chunks = ichunked(count(), 4)
  2486. >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
  2487. >>> list(c_2) # c_1's elements have been cached; c_3's haven't been
  2488. [4, 5, 6, 7]
  2489. >>> list(c_1)
  2490. [0, 1, 2, 3]
  2491. >>> list(c_3)
  2492. [8, 9, 10, 11]
  2493. """
  2494. source = iter(iterable)
  2495. while True:
  2496. # Check to see whether we're at the end of the source iterable
  2497. item = next(source, _marker)
  2498. if item is _marker:
  2499. return
  2500. # Clone the source and yield an n-length slice
  2501. source, it = tee(chain([item], source))
  2502. yield islice(it, n)
  2503. # Advance the source iterable
  2504. consume(source, n)
  2505. def distinct_combinations(iterable, r):
  2506. """Yield the distinct combinations of *r* items taken from *iterable*.
  2507. >>> list(distinct_combinations([0, 0, 1], 2))
  2508. [(0, 0), (0, 1)]
  2509. Equivalent to ``set(combinations(iterable))``, except duplicates are not
  2510. generated and thrown away. For larger input sequences this is much more
  2511. efficient.
  2512. """
  2513. if r < 0:
  2514. raise ValueError('r must be non-negative')
  2515. elif r == 0:
  2516. yield ()
  2517. return
  2518. pool = tuple(iterable)
  2519. generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
  2520. current_combo = [None] * r
  2521. level = 0
  2522. while generators:
  2523. try:
  2524. cur_idx, p = next(generators[-1])
  2525. except StopIteration:
  2526. generators.pop()
  2527. level -= 1
  2528. continue
  2529. current_combo[level] = p
  2530. if level + 1 == r:
  2531. yield tuple(current_combo)
  2532. else:
  2533. generators.append(
  2534. unique_everseen(
  2535. enumerate(pool[cur_idx + 1 :], cur_idx + 1),
  2536. key=itemgetter(1),
  2537. )
  2538. )
  2539. level += 1
  2540. def filter_except(validator, iterable, *exceptions):
  2541. """Yield the items from *iterable* for which the *validator* function does
  2542. not raise one of the specified *exceptions*.
  2543. *validator* is called for each item in *iterable*.
  2544. It should be a function that accepts one argument and raises an exception
  2545. if that item is not valid.
  2546. >>> iterable = ['1', '2', 'three', '4', None]
  2547. >>> list(filter_except(int, iterable, ValueError, TypeError))
  2548. ['1', '2', '4']
  2549. If an exception other than one given by *exceptions* is raised by
  2550. *validator*, it is raised like normal.
  2551. """
  2552. for item in iterable:
  2553. try:
  2554. validator(item)
  2555. except exceptions:
  2556. pass
  2557. else:
  2558. yield item
  2559. def map_except(function, iterable, *exceptions):
  2560. """Transform each item from *iterable* with *function* and yield the
  2561. result, unless *function* raises one of the specified *exceptions*.
  2562. *function* is called to transform each item in *iterable*.
  2563. It should be a accept one argument.
  2564. >>> iterable = ['1', '2', 'three', '4', None]
  2565. >>> list(map_except(int, iterable, ValueError, TypeError))
  2566. [1, 2, 4]
  2567. If an exception other than one given by *exceptions* is raised by
  2568. *function*, it is raised like normal.
  2569. """
  2570. for item in iterable:
  2571. try:
  2572. yield function(item)
  2573. except exceptions:
  2574. pass
  2575. def _sample_unweighted(iterable, k):
  2576. # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
  2577. # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
  2578. # Fill up the reservoir (collection of samples) with the first `k` samples
  2579. reservoir = take(k, iterable)
  2580. # Generate random number that's the largest in a sample of k U(0,1) numbers
  2581. # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
  2582. W = exp(log(random()) / k)
  2583. # The number of elements to skip before changing the reservoir is a random
  2584. # number with a geometric distribution. Sample it using random() and logs.
  2585. next_index = k + floor(log(random()) / log(1 - W))
  2586. for index, element in enumerate(iterable, k):
  2587. if index == next_index:
  2588. reservoir[randrange(k)] = element
  2589. # The new W is the largest in a sample of k U(0, `old_W`) numbers
  2590. W *= exp(log(random()) / k)
  2591. next_index += floor(log(random()) / log(1 - W)) + 1
  2592. return reservoir
  2593. def _sample_weighted(iterable, k, weights):
  2594. # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
  2595. # "Weighted random sampling with a reservoir".
  2596. # Log-transform for numerical stability for weights that are small/large
  2597. weight_keys = (log(random()) / weight for weight in weights)
  2598. # Fill up the reservoir (collection of samples) with the first `k`
  2599. # weight-keys and elements, then heapify the list.
  2600. reservoir = take(k, zip(weight_keys, iterable))
  2601. heapify(reservoir)
  2602. # The number of jumps before changing the reservoir is a random variable
  2603. # with an exponential distribution. Sample it using random() and logs.
  2604. smallest_weight_key, _ = reservoir[0]
  2605. weights_to_skip = log(random()) / smallest_weight_key
  2606. for weight, element in zip(weights, iterable):
  2607. if weight >= weights_to_skip:
  2608. # The notation here is consistent with the paper, but we store
  2609. # the weight-keys in log-space for better numerical stability.
  2610. smallest_weight_key, _ = reservoir[0]
  2611. t_w = exp(weight * smallest_weight_key)
  2612. r_2 = uniform(t_w, 1) # generate U(t_w, 1)
  2613. weight_key = log(r_2) / weight
  2614. heapreplace(reservoir, (weight_key, element))
  2615. smallest_weight_key, _ = reservoir[0]
  2616. weights_to_skip = log(random()) / smallest_weight_key
  2617. else:
  2618. weights_to_skip -= weight
  2619. # Equivalent to [element for weight_key, element in sorted(reservoir)]
  2620. return [heappop(reservoir)[1] for _ in range(k)]
  2621. def sample(iterable, k, weights=None):
  2622. """Return a *k*-length list of elements chosen (without replacement)
  2623. from the *iterable*. Like :func:`random.sample`, but works on iterables
  2624. of unknown length.
  2625. >>> iterable = range(100)
  2626. >>> sample(iterable, 5) # doctest: +SKIP
  2627. [81, 60, 96, 16, 4]
  2628. An iterable with *weights* may also be given:
  2629. >>> iterable = range(100)
  2630. >>> weights = (i * i + 1 for i in range(100))
  2631. >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
  2632. [79, 67, 74, 66, 78]
  2633. The algorithm can also be used to generate weighted random permutations.
  2634. The relative weight of each item determines the probability that it
  2635. appears late in the permutation.
  2636. >>> data = "abcdefgh"
  2637. >>> weights = range(1, len(data) + 1)
  2638. >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
  2639. ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
  2640. """
  2641. if k == 0:
  2642. return []
  2643. iterable = iter(iterable)
  2644. if weights is None:
  2645. return _sample_unweighted(iterable, k)
  2646. else:
  2647. weights = iter(weights)
  2648. return _sample_weighted(iterable, k, weights)
  2649. def is_sorted(iterable, key=None, reverse=False):
  2650. """Returns ``True`` if the items of iterable are in sorted order, and
  2651. ``False`` otherwise. *key* and *reverse* have the same meaning that they do
  2652. in the built-in :func:`sorted` function.
  2653. >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
  2654. True
  2655. >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
  2656. False
  2657. The function returns ``False`` after encountering the first out-of-order
  2658. item. If there are no out-of-order items, the iterable is exhausted.
  2659. """
  2660. compare = lt if reverse else gt
  2661. it = iterable if (key is None) else map(key, iterable)
  2662. return not any(starmap(compare, pairwise(it)))
  2663. class AbortThread(BaseException):
  2664. pass
  2665. class callback_iter:
  2666. """Convert a function that uses callbacks to an iterator.
  2667. Let *func* be a function that takes a `callback` keyword argument.
  2668. For example:
  2669. >>> def func(callback=None):
  2670. ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
  2671. ... if callback:
  2672. ... callback(i, c)
  2673. ... return 4
  2674. Use ``with callback_iter(func)`` to get an iterator over the parameters
  2675. that are delivered to the callback.
  2676. >>> with callback_iter(func) as it:
  2677. ... for args, kwargs in it:
  2678. ... print(args)
  2679. (1, 'a')
  2680. (2, 'b')
  2681. (3, 'c')
  2682. The function will be called in a background thread. The ``done`` property
  2683. indicates whether it has completed execution.
  2684. >>> it.done
  2685. True
  2686. If it completes successfully, its return value will be available
  2687. in the ``result`` property.
  2688. >>> it.result
  2689. 4
  2690. Notes:
  2691. * If the function uses some keyword argument besides ``callback``, supply
  2692. *callback_kwd*.
  2693. * If it finished executing, but raised an exception, accessing the
  2694. ``result`` property will raise the same exception.
  2695. * If it hasn't finished executing, accessing the ``result``
  2696. property from within the ``with`` block will raise ``RuntimeError``.
  2697. * If it hasn't finished executing, accessing the ``result`` property from
  2698. outside the ``with`` block will raise a
  2699. ``more_itertools.AbortThread`` exception.
  2700. * Provide *wait_seconds* to adjust how frequently the it is polled for
  2701. output.
  2702. """
  2703. def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
  2704. self._func = func
  2705. self._callback_kwd = callback_kwd
  2706. self._aborted = False
  2707. self._future = None
  2708. self._wait_seconds = wait_seconds
  2709. self._executor = ThreadPoolExecutor(max_workers=1)
  2710. self._iterator = self._reader()
  2711. def __enter__(self):
  2712. return self
  2713. def __exit__(self, exc_type, exc_value, traceback):
  2714. self._aborted = True
  2715. self._executor.shutdown()
  2716. def __iter__(self):
  2717. return self
  2718. def __next__(self):
  2719. return next(self._iterator)
  2720. @property
  2721. def done(self):
  2722. if self._future is None:
  2723. return False
  2724. return self._future.done()
  2725. @property
  2726. def result(self):
  2727. if not self.done:
  2728. raise RuntimeError('Function has not yet completed')
  2729. return self._future.result()
  2730. def _reader(self):
  2731. q = Queue()
  2732. def callback(*args, **kwargs):
  2733. if self._aborted:
  2734. raise AbortThread('canceled by user')
  2735. q.put((args, kwargs))
  2736. self._future = self._executor.submit(
  2737. self._func, **{self._callback_kwd: callback}
  2738. )
  2739. while True:
  2740. try:
  2741. item = q.get(timeout=self._wait_seconds)
  2742. except Empty:
  2743. pass
  2744. else:
  2745. q.task_done()
  2746. yield item
  2747. if self._future.done():
  2748. break
  2749. remaining = []
  2750. while True:
  2751. try:
  2752. item = q.get_nowait()
  2753. except Empty:
  2754. break
  2755. else:
  2756. q.task_done()
  2757. remaining.append(item)
  2758. q.join()
  2759. yield from remaining
  2760. def windowed_complete(iterable, n):
  2761. """
  2762. Yield ``(beginning, middle, end)`` tuples, where:
  2763. * Each ``middle`` has *n* items from *iterable*
  2764. * Each ``beginning`` has the items before the ones in ``middle``
  2765. * Each ``end`` has the items after the ones in ``middle``
  2766. >>> iterable = range(7)
  2767. >>> n = 3
  2768. >>> for beginning, middle, end in windowed_complete(iterable, n):
  2769. ... print(beginning, middle, end)
  2770. () (0, 1, 2) (3, 4, 5, 6)
  2771. (0,) (1, 2, 3) (4, 5, 6)
  2772. (0, 1) (2, 3, 4) (5, 6)
  2773. (0, 1, 2) (3, 4, 5) (6,)
  2774. (0, 1, 2, 3) (4, 5, 6) ()
  2775. Note that *n* must be at least 0 and most equal to the length of
  2776. *iterable*.
  2777. This function will exhaust the iterable and may require significant
  2778. storage.
  2779. """
  2780. if n < 0:
  2781. raise ValueError('n must be >= 0')
  2782. seq = tuple(iterable)
  2783. size = len(seq)
  2784. if n > size:
  2785. raise ValueError('n must be <= len(seq)')
  2786. for i in range(size - n + 1):
  2787. beginning = seq[:i]
  2788. middle = seq[i : i + n]
  2789. end = seq[i + n :]
  2790. yield beginning, middle, end
  2791. def all_unique(iterable, key=None):
  2792. """
  2793. Returns ``True`` if all the elements of *iterable* are unique (no two
  2794. elements are equal).
  2795. >>> all_unique('ABCB')
  2796. False
  2797. If a *key* function is specified, it will be used to make comparisons.
  2798. >>> all_unique('ABCb')
  2799. True
  2800. >>> all_unique('ABCb', str.lower)
  2801. False
  2802. The function returns as soon as the first non-unique element is
  2803. encountered. Iterables with a mix of hashable and unhashable items can
  2804. be used, but the function will be slower for unhashable items.
  2805. """
  2806. seenset = set()
  2807. seenset_add = seenset.add
  2808. seenlist = []
  2809. seenlist_add = seenlist.append
  2810. for element in map(key, iterable) if key else iterable:
  2811. try:
  2812. if element in seenset:
  2813. return False
  2814. seenset_add(element)
  2815. except TypeError:
  2816. if element in seenlist:
  2817. return False
  2818. seenlist_add(element)
  2819. return True
  2820. def nth_product(index, *args):
  2821. """Equivalent to ``list(product(*args))[index]``.
  2822. The products of *args* can be ordered lexicographically.
  2823. :func:`nth_product` computes the product at sort position *index* without
  2824. computing the previous products.
  2825. >>> nth_product(8, range(2), range(2), range(2), range(2))
  2826. (1, 0, 0, 0)
  2827. ``IndexError`` will be raised if the given *index* is invalid.
  2828. """
  2829. pools = list(map(tuple, reversed(args)))
  2830. ns = list(map(len, pools))
  2831. c = reduce(mul, ns)
  2832. if index < 0:
  2833. index += c
  2834. if not 0 <= index < c:
  2835. raise IndexError
  2836. result = []
  2837. for pool, n in zip(pools, ns):
  2838. result.append(pool[index % n])
  2839. index //= n
  2840. return tuple(reversed(result))
  2841. def nth_permutation(iterable, r, index):
  2842. """Equivalent to ``list(permutations(iterable, r))[index]```
  2843. The subsequences of *iterable* that are of length *r* where order is
  2844. important can be ordered lexicographically. :func:`nth_permutation`
  2845. computes the subsequence at sort position *index* directly, without
  2846. computing the previous subsequences.
  2847. >>> nth_permutation('ghijk', 2, 5)
  2848. ('h', 'i')
  2849. ``ValueError`` will be raised If *r* is negative or greater than the length
  2850. of *iterable*.
  2851. ``IndexError`` will be raised if the given *index* is invalid.
  2852. """
  2853. pool = list(iterable)
  2854. n = len(pool)
  2855. if r is None or r == n:
  2856. r, c = n, factorial(n)
  2857. elif not 0 <= r < n:
  2858. raise ValueError
  2859. else:
  2860. c = factorial(n) // factorial(n - r)
  2861. if index < 0:
  2862. index += c
  2863. if not 0 <= index < c:
  2864. raise IndexError
  2865. if c == 0:
  2866. return tuple()
  2867. result = [0] * r
  2868. q = index * factorial(n) // c if r < n else index
  2869. for d in range(1, n + 1):
  2870. q, i = divmod(q, d)
  2871. if 0 <= n - d < r:
  2872. result[n - d] = i
  2873. if q == 0:
  2874. break
  2875. return tuple(map(pool.pop, result))
  2876. def value_chain(*args):
  2877. """Yield all arguments passed to the function in the same order in which
  2878. they were passed. If an argument itself is iterable then iterate over its
  2879. values.
  2880. >>> list(value_chain(1, 2, 3, [4, 5, 6]))
  2881. [1, 2, 3, 4, 5, 6]
  2882. Binary and text strings are not considered iterable and are emitted
  2883. as-is:
  2884. >>> list(value_chain('12', '34', ['56', '78']))
  2885. ['12', '34', '56', '78']
  2886. Multiple levels of nesting are not flattened.
  2887. """
  2888. for value in args:
  2889. if isinstance(value, (str, bytes)):
  2890. yield value
  2891. continue
  2892. try:
  2893. yield from value
  2894. except TypeError:
  2895. yield value
  2896. def product_index(element, *args):
  2897. """Equivalent to ``list(product(*args)).index(element)``
  2898. The products of *args* can be ordered lexicographically.
  2899. :func:`product_index` computes the first index of *element* without
  2900. computing the previous products.
  2901. >>> product_index([8, 2], range(10), range(5))
  2902. 42
  2903. ``ValueError`` will be raised if the given *element* isn't in the product
  2904. of *args*.
  2905. """
  2906. index = 0
  2907. for x, pool in zip_longest(element, args, fillvalue=_marker):
  2908. if x is _marker or pool is _marker:
  2909. raise ValueError('element is not a product of args')
  2910. pool = tuple(pool)
  2911. index = index * len(pool) + pool.index(x)
  2912. return index
  2913. def combination_index(element, iterable):
  2914. """Equivalent to ``list(combinations(iterable, r)).index(element)``
  2915. The subsequences of *iterable* that are of length *r* can be ordered
  2916. lexicographically. :func:`combination_index` computes the index of the
  2917. first *element*, without computing the previous combinations.
  2918. >>> combination_index('adf', 'abcdefg')
  2919. 10
  2920. ``ValueError`` will be raised if the given *element* isn't one of the
  2921. combinations of *iterable*.
  2922. """
  2923. element = enumerate(element)
  2924. k, y = next(element, (None, None))
  2925. if k is None:
  2926. return 0
  2927. indexes = []
  2928. pool = enumerate(iterable)
  2929. for n, x in pool:
  2930. if x == y:
  2931. indexes.append(n)
  2932. tmp, y = next(element, (None, None))
  2933. if tmp is None:
  2934. break
  2935. else:
  2936. k = tmp
  2937. else:
  2938. raise ValueError('element is not a combination of iterable')
  2939. n, _ = last(pool, default=(n, None))
  2940. # Python versiosn below 3.8 don't have math.comb
  2941. index = 1
  2942. for i, j in enumerate(reversed(indexes), start=1):
  2943. j = n - j
  2944. if i <= j:
  2945. index += factorial(j) // (factorial(i) * factorial(j - i))
  2946. return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
  2947. def permutation_index(element, iterable):
  2948. """Equivalent to ``list(permutations(iterable, r)).index(element)```
  2949. The subsequences of *iterable* that are of length *r* where order is
  2950. important can be ordered lexicographically. :func:`permutation_index`
  2951. computes the index of the first *element* directly, without computing
  2952. the previous permutations.
  2953. >>> permutation_index([1, 3, 2], range(5))
  2954. 19
  2955. ``ValueError`` will be raised if the given *element* isn't one of the
  2956. permutations of *iterable*.
  2957. """
  2958. index = 0
  2959. pool = list(iterable)
  2960. for i, x in zip(range(len(pool), -1, -1), element):
  2961. r = pool.index(x)
  2962. index = index * i + r
  2963. del pool[r]
  2964. return index
  2965. class countable:
  2966. """Wrap *iterable* and keep a count of how many items have been consumed.
  2967. The ``items_seen`` attribute starts at ``0`` and increments as the iterable
  2968. is consumed:
  2969. >>> iterable = map(str, range(10))
  2970. >>> it = countable(iterable)
  2971. >>> it.items_seen
  2972. 0
  2973. >>> next(it), next(it)
  2974. ('0', '1')
  2975. >>> list(it)
  2976. ['2', '3', '4', '5', '6', '7', '8', '9']
  2977. >>> it.items_seen
  2978. 10
  2979. """
  2980. def __init__(self, iterable):
  2981. self._it = iter(iterable)
  2982. self.items_seen = 0
  2983. def __iter__(self):
  2984. return self
  2985. def __next__(self):
  2986. item = next(self._it)
  2987. self.items_seen += 1
  2988. return item