Coverage for src/somesy/core/models.py: 94%
324 statements
« prev ^ index » next coverage.py v7.6.0, created at 2025-03-14 13:02 +0000
« prev ^ index » next coverage.py v7.6.0, created at 2025-03-14 13:02 +0000
1"""Core models for the somesy package."""
3from __future__ import annotations
5import functools
6import json
7import re
8from datetime import date
9from pathlib import Path
10from typing import Any, Dict, List, Optional, Union
12from pydantic import (
13 BaseModel,
14 Field,
15 PrivateAttr,
16 field_validator,
17 model_validator,
18)
19from rich.pretty import pretty_repr
20from typing_extensions import Annotated
22from .core import get_input_content
23from .log import SomesyLogLevel
24from .types import ContributionTypeEnum, Country, HttpUrlStr, LicenseEnum
26# --------
27# Somesy configuration model
30class SomesyBaseModel(BaseModel):
31 """Customized pydantic BaseModel for somesy.
33 Apart from some general tweaks for better defaults,
34 adds a private `_key_order` field, which is used to track the
35 preferred order for serialization (usually coming from some existing input).
37 It can be set on an instance using the set_key_order method,
38 and is preserved by `copy()`.
40 NOTE: The custom order is intended for leaf models (no further nested models),
41 custom order will not work correctly across nesting layers.
42 """
44 model_config = dict(
45 extra="forbid",
46 validate_assignment=True,
47 populate_by_name=True,
48 str_strip_whitespace=True,
49 str_min_length=1,
50 )
52 # ----
53 # Key order magic
55 _key_order: List[str] = PrivateAttr([])
56 """List of field names (NOT aliases!) in the order they should be written in."""
58 @classmethod
59 @functools.lru_cache() # compute once per class
60 def _aliases(cls) -> Dict[str, str]:
61 """Map back from alias field names to internal field names."""
62 return {v.alias or k: k for k, v in cls.model_fields.items()}
64 @classmethod
65 def make_partial(cls, dct):
66 """Construct unvalidated partial model from dict.
68 Handles aliases correctly, unlike `construct`.
69 """
70 un_alias = cls._aliases()
71 return cls.model_construct(**{un_alias.get(k) or k: v for k, v in dct.items()})
73 def set_key_order(self, keys: List[str]):
74 """Setter for custom key order used in serialization."""
75 un_alias = self._aliases()
76 # make sure we use the _actual_ field names
77 self._key_order = list(map(lambda k: un_alias.get(k) or k, keys))
79 def model_copy(self, *args, **kwargs):
80 """Patched copy method (to preserve custom key order)."""
81 ret = super().model_copy(*args, **kwargs)
82 ret.set_key_order(list(self._key_order))
83 return ret
85 @staticmethod
86 def _patch_kwargs_defaults(kwargs):
87 """Set some default arguments if they are not set by kwargs."""
88 for key in ["exclude_defaults", "exclude_none"]:
89 if kwargs.get(key, None) is None:
90 kwargs[key] = True
92 def _reorder_dict(self, dct):
93 """Return dict with patched key order (according to `self._key_order`).
95 Keys in `dct` not listed in `self._key_order` come after all others.
97 Used to patch up `model_dump()` and `model_dump_json()`.
98 """
99 key_order = self._key_order or []
100 existing = set(key_order).intersection(set(dct.keys()))
101 key_order = [k for k in key_order if k in existing]
102 key_order += list(set(dct.keys()) - set(key_order))
103 return {k: dct[k] for k in key_order}
105 def model_dump(self, *args, **kwargs):
106 """Patched dict method (to preserve custom key order)."""
107 self._patch_kwargs_defaults(kwargs)
108 by_alias = kwargs.pop("by_alias", False)
110 dct = super().model_dump(*args, **kwargs, by_alias=False)
111 ret = self._reorder_dict(dct)
113 if by_alias:
114 ret = {self.model_fields[k].alias or k: v for k, v in ret.items()}
115 return ret
117 def model_dump_json(self, *args, **kwargs):
118 """Patched json method (to preserve custom key order)."""
119 self._patch_kwargs_defaults(kwargs)
120 by_alias = kwargs.pop("by_alias", False)
122 # loop back json through dict to apply custom key order
123 dct = json.loads(super().model_dump_json(*args, **kwargs, by_alias=False))
124 ret = self._reorder_dict(dct)
126 if by_alias:
127 ret = {self.model_fields[k].alias or k: v for k, v in ret.items()}
128 return json.dumps(ret, ensure_ascii=False)
131_SOMESY_TARGETS = [
132 "cff",
133 "pyproject",
134 "package_json",
135 "codemeta",
136 "julia",
137 "fortran",
138 "pom_xml",
139 "mkdocs",
140 "rust",
141]
144class SomesyConfig(SomesyBaseModel):
145 """Pydantic model for somesy tool configuration.
147 Note that all fields match CLI options, and CLI options will override the
148 values declared in a somesy input file (such as `somesy.toml`).
149 """
151 @model_validator(mode="before")
152 @classmethod
153 def at_least_one_target(cls, values):
154 """Check that at least one output file is enabled."""
155 if all(map(lambda x: values.get(f"no_sync_{x}"), _SOMESY_TARGETS)):
156 msg = "No sync target enabled, nothing to do. Probably this is a mistake?"
157 raise ValueError(msg)
159 return values
161 # cli flags
162 show_info: Annotated[
163 bool,
164 Field(
165 description="Show basic information messages on run (-v flag).",
166 ),
167 ] = False
168 verbose: Annotated[
169 bool, Field(description="Show verbose messages on run (-vv flag).")
170 ] = False
171 debug: Annotated[
172 bool, Field(description="Show debug messages on run (-vvv flag).")
173 ] = False
175 input_file: Annotated[
176 Optional[Path], Field(description="Project metadata input file path.")
177 ] = Path("somesy.toml")
179 no_sync_pyproject: Annotated[
180 bool, Field(description="Do not sync with pyproject.toml.")
181 ] = False
182 pyproject_file: Annotated[
183 Union[Path, List[Path]], Field(description="pyproject.toml file path.")
184 ] = Path("pyproject.toml")
186 no_sync_package_json: Annotated[
187 bool, Field(description="Do not sync with package.json.")
188 ] = False
189 package_json_file: Annotated[
190 Union[Path, List[Path]], Field(description="package.json file path.")
191 ] = Path("package.json")
193 no_sync_julia: Annotated[
194 bool, Field(description="Do not sync with Project.toml.")
195 ] = False
196 julia_file: Annotated[
197 Union[Path, List[Path]], Field(description="Project.toml file path.")
198 ] = Path("Project.toml")
200 no_sync_fortran: Annotated[
201 bool, Field(description="Do not sync with fpm.toml.")
202 ] = False
203 fortran_file: Annotated[
204 Union[Path, List[Path]], Field(description="fpm.toml file path.")
205 ] = Path("fpm.toml")
207 no_sync_pom_xml: Annotated[bool, Field(description="Do not sync with pom.xml.")] = (
208 False
209 )
210 pom_xml_file: Annotated[
211 Union[Path, List[Path]], Field(description="pom.xml file path.")
212 ] = Path("pom.xml")
214 no_sync_mkdocs: Annotated[
215 bool, Field(description="Do not sync with mkdocs.yml.")
216 ] = False
217 mkdocs_file: Annotated[
218 Union[Path, List[Path]], Field(description="mkdocs.yml file path.")
219 ] = Path("mkdocs.yml")
221 no_sync_rust: Annotated[bool, Field(description="Do not sync with Cargo.toml.")] = (
222 False
223 )
224 rust_file: Annotated[
225 Union[Path, List[Path]], Field(description="Cargo.toml file path.")
226 ] = Path("Cargo.toml")
228 no_sync_cff: Annotated[bool, Field(description="Do not sync with CFF.")] = False
229 cff_file: Annotated[
230 Union[Path, List[Path]], Field(description="CFF file path.")
231 ] = Path("CITATION.cff")
233 no_sync_codemeta: Annotated[
234 bool, Field(description="Do not sync with codemeta.json.")
235 ] = False
236 codemeta_file: Annotated[
237 Union[Path, List[Path]], Field(description="codemeta.json file path.")
238 ] = Path("codemeta.json")
239 merge_codemeta: Annotated[
240 bool,
241 Field(
242 description="Merge codemeta.json with with an existing codemeta.json file."
243 ),
244 ] = False
246 # property to pass validation for all inputs/outputs
247 pass_validation: Annotated[
248 Optional[bool],
249 Field(description="Pass validation for all output files."),
250 ] = False
252 # packages (sub-folders) for monorepos with their own somesy config
253 packages: Annotated[
254 Optional[Union[Path, List[Path]]],
255 Field(
256 description="Packages (sub-folders) for monorepos with their own somesy config."
257 ),
258 ] = None
260 def log_level(self) -> SomesyLogLevel:
261 """Return log level derived from this configuration."""
262 return SomesyLogLevel.from_flags(
263 info=self.show_info, verbose=self.verbose, debug=self.debug
264 )
266 def update_log_level(self, log_level: SomesyLogLevel):
267 """Update config flags according to passed log level."""
268 self.show_info = log_level == SomesyLogLevel.INFO
269 self.verbose = log_level == SomesyLogLevel.VERBOSE
270 self.debug = log_level == SomesyLogLevel.DEBUG
272 def get_input(self) -> SomesyInput:
273 """Based on the somesy config, load the complete somesy input."""
274 # get metadata+config from specified input file
275 somesy_input = SomesyInput.from_input_file(self.input_file)
276 # update input with merged config settings (cli overrides config file)
277 dct: Dict[str, Any] = {}
278 dct.update(somesy_input.config or {})
279 dct.update(self.model_dump())
280 somesy_input.config = SomesyConfig(**dct)
281 return somesy_input
283 def resolve_paths(self, base_dir: Path) -> None:
284 """Resolve all paths in the config relative to the given base directory.
286 Args:
287 base_dir: The base directory to resolve paths against.
289 """
291 def resolve_path(
292 paths: Optional[Union[Path, List[Path]]],
293 ) -> Optional[Union[Path, List[Path]]]:
294 if paths is None:
295 return None
296 if isinstance(paths, list):
297 return [base_dir / p for p in paths]
298 return base_dir / paths
300 # Resolve all file paths
301 resolved_input = resolve_path(self.input_file)
302 self.input_file = resolved_input if isinstance(resolved_input, Path) else None
303 self.pyproject_file = resolve_path(self.pyproject_file)
304 self.package_json_file = resolve_path(self.package_json_file)
305 self.julia_file = resolve_path(self.julia_file)
306 self.fortran_file = resolve_path(self.fortran_file)
307 self.pom_xml_file = resolve_path(self.pom_xml_file)
308 self.mkdocs_file = resolve_path(self.mkdocs_file)
309 self.rust_file = resolve_path(self.rust_file)
310 self.cff_file = resolve_path(self.cff_file)
311 self.codemeta_file = resolve_path(self.codemeta_file)
312 self.packages = resolve_path(self.packages)
315# --------
316# Project metadata model (modified from CITATION.cff)
319class ContributorBaseModel(SomesyBaseModel):
320 """Base model for Person and Entity models.
322 This schema is based on CITATION.cff 1.2, modified and extended for the needs of somesy.
323 """
325 email: Annotated[
326 Optional[str],
327 Field(
328 pattern=r"^[\S]+@[\S]+\.[\S]{2,}$",
329 description="The person's email address.",
330 ),
331 ] = None
333 alias: Annotated[Optional[str], Field(description="The contributor's alias.")] = (
334 None
335 )
336 address: Annotated[
337 Optional[str], Field(description="The contributor's address.")
338 ] = None
339 city: Annotated[Optional[str], Field(description="The entity's city.")] = None
340 country: Annotated[
341 Optional[Country], Field(description="The entity's country.")
342 ] = None
343 fax: Annotated[Optional[str], Field(description="The person's fax number.")] = None
344 post_code: Annotated[
345 Optional[str], Field(alias="post-code", description="The entity's post-code.")
346 ] = None
347 region: Annotated[Optional[str], Field(description="The entity's region.")] = None
348 tel: Annotated[Optional[str], Field(description="The entity's phone number.")] = (
349 None
350 )
352 # ----
353 # somesy-specific extensions
354 author: Annotated[
355 bool,
356 Field(
357 description="Indicates whether the entity is an author of the project (i.e. significant contributor)."
358 ),
359 ] = False
360 publication_author: Annotated[
361 Optional[bool],
362 Field(
363 description="Indicates whether the entity is to be listed as an author in academic citations."
364 ),
365 ] = None
366 maintainer: Annotated[
367 bool,
368 Field(
369 description="Indicates whether the entity is a maintainer of the project (i.e. for contact)."
370 ),
371 ] = False
373 # NOTE: CFF 1.3 (once done) might provide ways for refined contributor description. That should be implemented here.
374 contribution: Annotated[
375 Optional[str],
376 Field(description="Summary of how the entity contributed to the project."),
377 ] = None
378 contribution_types: Annotated[
379 Optional[List[ContributionTypeEnum]],
380 Field(
381 description="Relevant types of contributions (see https://allcontributors.org/docs/de/emoji-key).",
382 min_length=1,
383 ),
384 ] = None
385 contribution_begin: Annotated[
386 Optional[date], Field(description="Beginning date of the contribution.")
387 ] = None
388 contribution_end: Annotated[
389 Optional[date], Field(description="Ending date of the contribution.")
390 ] = None
392 @model_validator(mode="before")
393 @classmethod
394 def author_implies_publication(cls, values):
395 """Ensure consistency of author and publication_author."""
396 if values.get("author"):
397 # NOTE: explicitly check for False (different case from None = missing!)
398 if values.get("publication_author") is False:
399 msg = "Combining author=true and publication_author=false is invalid!"
400 raise ValueError(msg)
401 values["publication_author"] = True
402 return values
404 # helper methods
405 @property
406 def full_name(self) -> str:
407 """Return the name of the contributor."""
408 pass
410 def to_name_email_string(self) -> str:
411 """Convert project metadata person object to poetry string for person format `full name <x@y.z>`."""
412 if self.email:
413 return f"{self.full_name} <{self.email}>"
414 else:
415 return self.full_name
417 @classmethod
418 def from_name_email_string(cls, person: str):
419 """Return the type of class based on an name/e-mail string like `full name <x@y.z>`.
421 If the name is `A B C`, then `A B` will be the given names and `C` will be the family name.
422 """
423 pass
426class Entity(ContributorBaseModel):
427 """Metadata about an entity in the context of a software project ownership.
429 An entity, i.e., an institution, team, research group, company, conference, etc., as opposed to a single natural person.
430 This schema is based on CITATION.cff 1.2, modified and extended for the needs of somesy.
431 """
433 # NOTE: we rely on the defined aliases for direct CITATION.cff interoperability.
435 date_end: Annotated[
436 Optional[date],
437 Field(
438 alias="date-end",
439 description="The entity's ending date, e.g., when the entity is a conference.",
440 ),
441 ] = None
442 date_start: Annotated[
443 Optional[date],
444 Field(
445 alias="date-start",
446 description="The entity's starting date, e.g., when the entity is a conference.",
447 ),
448 ] = None
449 location: Annotated[
450 Optional[str],
451 Field(
452 description="The entity's location, e.g., when the entity is a conference."
453 ),
454 ] = None
455 name: Annotated[str, Field(description="The entity's name.")]
456 website: Annotated[
457 Optional[HttpUrlStr], Field(description="The entity's website.")
458 ] = None
459 rorid: Annotated[
460 Optional[HttpUrlStr],
461 Field(
462 description="The entity's ROR ID url **(not required, but highly suggested)**."
463 ),
464 ] = None
466 # helper methods
467 @property
468 def full_name(self) -> str:
469 """Use same property as Person for code integration."""
470 return self.name
472 @classmethod
473 def from_name_email_string(cls, entity: str) -> Entity:
474 """Return an `Entity` based on an name/e-mail string like `name <x@y.z>`."""
475 m = re.match(r"\s*([^<]+)<([^>]+)>", entity)
476 if m is None:
477 return Entity(**{"name": entity})
479 name, mail = (
480 m.group(1).strip(),
481 m.group(2).strip(),
482 )
483 return Entity(
484 **{
485 "name": name,
486 "email": mail,
487 }
488 )
490 def same_person(self, other: Entity) -> bool:
491 """Return whether two Entity metadata records are about the same real person.
493 Uses heuristic match based on email and name (whichever are provided).
494 """
495 if not isinstance(other, Entity):
496 return False
497 if self.rorid is not None and other.rorid is not None:
498 if self.rorid == other.rorid:
499 return True
500 if self.website is not None and other.website is not None:
501 if self.website == other.website:
502 return True
503 if self.email is not None and other.email is not None:
504 if self.email == other.email:
505 return True
506 return self.name == other.name
508 def model_dump_json(self, *args, **kwargs):
509 """Patched json method (to preserve custom key order), remove rorid and set it as website if it is not None."""
510 ret = super().model_dump_json(*args, **kwargs)
511 # convert ret to dict
512 ret = json.loads(ret)
513 if self.rorid is not None and "website" not in ret:
514 ret["website"] = str(self.rorid)
515 ret.pop("rorid")
516 # convert ret back to json string
517 return json.dumps(ret)
520class Person(ContributorBaseModel):
521 """Metadata about a person in the context of a software project.
523 This schema is based on CITATION.cff 1.2, modified and extended for the needs of somesy.
524 """
526 # NOTE: we rely on the defined aliases for direct CITATION.cff interoperability.
528 orcid: Annotated[
529 Optional[HttpUrlStr],
530 Field(
531 description="The person's ORCID url **(not required, but highly suggested)**."
532 ),
533 ] = None
534 family_names: Annotated[
535 str, Field(alias="family-names", description="The person's family names.")
536 ]
537 given_names: Annotated[
538 str, Field(alias="given-names", description="The person's given names.")
539 ]
540 name_particle: Annotated[
541 Optional[str],
542 Field(
543 alias="name-particle",
544 description="The person's name particle, e.g., a nobiliary particle or a preposition meaning 'of' or 'from'"
545 " (for example 'von' in 'Alexander von Humboldt').",
546 examples=["von"],
547 ),
548 ] = None
549 name_suffix: Annotated[
550 Optional[str],
551 Field(
552 alias="name-suffix",
553 description="The person's name-suffix, e.g. 'Jr.' for Sammy Davis Jr. or 'III' for Frank Edwin Wright III.",
554 examples=["Jr.", "III"],
555 ),
556 ] = None
557 affiliation: Annotated[
558 Optional[str], Field(description="The person's affiliation.")
559 ] = None
561 # helper methods
563 @field_validator("orcid", mode="before")
564 @classmethod
565 def orcid_from_string(cls, orcid: str) -> Optional[HttpUrlStr]:
566 """Convert orcid id string to HttpUrlStr."""
567 # orcid regex without https://orcid.org/ prefix
568 orcid_regex = r"^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{3}[0-9X]$"
569 if orcid is not None and isinstance(orcid, str):
570 if re.match(orcid_regex, orcid):
571 return f"https://orcid.org/{orcid}"
572 return orcid
574 @property
575 def full_name(self) -> str:
576 """Return the full name of the person."""
577 names = []
579 if self.given_names:
580 names.append(self.given_names)
582 if self.name_particle:
583 names.append(self.name_particle)
585 if self.family_names:
586 names.append(self.family_names)
588 if self.name_suffix:
589 names.append(self.name_suffix)
591 return " ".join(names) if names else ""
593 @classmethod
594 def from_name_email_string(cls, person: str) -> Person:
595 """Return a `Person` based on an name/e-mail string like `full name <x@y.z>`.
597 If the name is `A B C`, then `A B` will be the given names and `C` will be the family name.
598 """
599 m = re.match(r"\s*([^<]+)<([^>]+)>", person)
600 if m is None:
601 names = list(map(lambda s: s.strip(), person.split()))
602 return Person(
603 **{
604 "given-names": " ".join(names[:-1]),
605 "family-names": names[-1],
606 }
607 )
608 if m is None:
609 names = list(map(lambda s: s.strip(), person.split()))
610 return Person(
611 **{
612 "given-names": " ".join(names[:-1]),
613 "family-names": names[-1],
614 }
615 )
616 names, mail = (
617 list(map(lambda s: s.strip(), m.group(1).split())),
618 m.group(2).strip(),
619 )
620 # NOTE: for our purposes, does not matter what are given or family names,
621 # we only compare on full_name anyway.
622 return Person(
623 **{
624 "given-names": " ".join(names[:-1]),
625 "family-names": names[-1],
626 "email": mail,
627 }
628 )
630 def same_person(self, other) -> bool:
631 """Return whether two Person metadata records are about the same real person.
633 Uses heuristic match based on orcid, email and name (whichever are provided).
634 """
635 if not isinstance(other, Person):
636 return False
637 if self.orcid is not None and other.orcid is not None:
638 # having orcids is the best case, a real identifier
639 # NOTE: converting to str from pydantic-internal Url object for == !
640 return str(self.orcid) == str(other.orcid)
642 # otherwise, try to match according to mail/name
643 # sourcery skip: merge-nested-ifs
644 if self.email is not None and other.email is not None:
645 if self.email == other.email:
646 # an email address belongs to exactly one person
647 # => same email -> same person
648 return True
649 # otherwise, need to check name
650 # (a person often has multiple email addresses)
652 # no orcids, no/distinct email address
653 # -> decide based on full_name (which is always present)
654 return self.full_name == other.full_name
657class ProjectMetadata(SomesyBaseModel):
658 """Pydantic model for Project Metadata Input."""
660 model_config = dict(extra="ignore")
662 @field_validator("people")
663 @classmethod
664 def ensure_distinct_people(cls, people):
665 """Make sure that no person is listed twice in the same list."""
666 for i in range(len(people)):
667 for j in range(i + 1, len(people)):
668 if people[i].same_person(people[j]):
669 p1 = pretty_repr(json.loads(people[i].model_dump_json()))
670 p2 = pretty_repr(json.loads(people[j].model_dump_json()))
671 msg = f"Same person is listed twice:\n{p1}\n{p2}"
672 raise ValueError(msg)
673 return people
675 @field_validator("entities")
676 @classmethod
677 def ensure_distinct_entities(cls, entities):
678 """Make sure that no entity is listed twice in the same list."""
679 for i in range(len(entities)):
680 for j in range(i + 1, len(entities)):
681 if entities[i].same_person(entities[j]):
682 e1 = pretty_repr(json.loads(entities[i].model_dump_json()))
683 e2 = pretty_repr(json.loads(entities[j].model_dump_json()))
684 msg = f"Same entity is listed twice:\n{e1}\n{e2}"
685 raise ValueError(msg)
686 return entities
688 @model_validator(mode="after")
689 def at_least_one_author(self) -> ProjectMetadata:
690 """Make sure there is at least one author."""
691 if not self.people and not self.entities:
692 raise ValueError(
693 "There have to be at least a person or an organization in the input"
694 )
695 if not any(map(lambda p: p.author, self.people)) and not any(
696 map(lambda e: e.author, self.entities)
697 ):
698 raise ValueError("At least one person must be an author of this project.")
699 return self
701 name: Annotated[str, Field(description="Project name.")]
702 description: Annotated[str, Field(description="Project description.")]
703 version: Annotated[Optional[str], Field(description="Project version.")] = None
704 license: Annotated[LicenseEnum, Field(description="SPDX License string.")]
706 homepage: Annotated[
707 Optional[HttpUrlStr], Field(description="URL of the project homepage.")
708 ] = None
709 repository: Annotated[
710 Optional[HttpUrlStr],
711 Field(description="URL of the project source code repository."),
712 ] = None
713 documentation: Annotated[
714 Optional[HttpUrlStr], Field(description="URL of the project documentation.")
715 ] = None
717 keywords: Annotated[
718 Optional[List[str]],
719 Field(min_length=1, description="Keywords that describe the project."),
720 ] = None
722 people: Annotated[
723 Optional[List[Person]],
724 Field(
725 description="Project authors, maintainers and contributors.",
726 default_factory=list,
727 ),
728 ]
730 entities: Annotated[
731 Optional[List[Entity]],
732 Field(
733 description="Project authors, maintainers and contributors as entities (organizations).",
734 default_factory=list,
735 ),
736 ]
738 def authors(self):
739 """Return people and entities explicitly marked as authors."""
740 authors = [p for p in self.people if p.author]
741 authors.extend([e for e in self.entities if e.author])
742 return authors
744 def publication_authors(self):
745 """Return people marked as publication authors.
747 This always includes people marked as authors.
748 """
749 # return an empty list if no publication authors are specified
750 if not any(map(lambda p: p.publication_author, self.people)) and not any(
751 map(lambda p: p.publication_author, self.entities)
752 ):
753 return []
754 publication_authors = [p for p in self.people if p.publication_author]
755 publication_authors.extend([e for e in self.entities if e.publication_author])
756 return publication_authors
758 def maintainers(self):
759 """Return people and entities marked as maintainers."""
760 maintainers = [p for p in self.people if p.maintainer]
761 maintainers.extend([e for e in self.entities if e.maintainer])
762 return maintainers
764 def contributors(self):
765 """Return only people and entities not marked as authors."""
766 contributors = [p for p in self.people if not p.author]
767 contributors.extend([e for e in self.entities if not e.author])
768 return contributors
771class SomesyInput(SomesyBaseModel):
772 """The complete somesy input file (`somesy.toml`) or section (`pyproject.toml`)."""
774 _origin: Optional[Path]
776 project: Annotated[
777 ProjectMetadata,
778 Field(description="Project metadata to be used and synchronized."),
779 ]
780 config: Annotated[
781 Optional[SomesyConfig],
782 Field(
783 description="somesy tool configuration (matches CLI flags).",
784 default_factory=lambda: SomesyConfig(),
785 ),
786 ]
788 # if config.input_file is set, use it as origin
789 @model_validator(mode="after")
790 def set_origin(self):
791 """Set the origin of the input file."""
792 if self.config and self.config.input_file:
793 self._origin = self.config.input_file
794 return self
796 def is_somesy_file(self) -> bool:
797 """Return whether this somesy input is from a somesy config file.
799 That means, returns False if it is from pyproject.toml or package.json.
800 """
801 return self.is_somesy_file_path(self._origin or Path("."))
803 @classmethod
804 def is_somesy_file_path(cls, path: Path) -> bool:
805 """Return whether the path looks like a somesy config file.
807 That means, returns False if it is e.g. pyproject.toml or package.json.
808 """
809 return str(path).endswith("somesy.toml")
811 @classmethod
812 def from_input_file(cls, path: Path) -> SomesyInput:
813 """Load somesy input from given file."""
814 content = get_input_content(path)
815 ret = SomesyInput(**content)
816 ret._origin = path
817 return ret