Coverage for src/somesy/core/models.py: 93%
316 statements
« prev ^ index » next coverage.py v7.6.0, created at 2025-03-10 14:56 +0000
« prev ^ index » next coverage.py v7.6.0, created at 2025-03-10 14:56 +0000
1"""Core models for the somesy package."""
3from __future__ import annotations
5import functools
6import json
7import re
8from datetime import date
9from pathlib import Path
10from typing import Any, Dict, List, Optional, Union
12from pydantic import (
13 BaseModel,
14 Field,
15 PrivateAttr,
16 field_validator,
17 model_validator,
18)
19from rich.pretty import pretty_repr
20from typing_extensions import Annotated
22from .core import get_input_content
23from .log import SomesyLogLevel
24from .types import ContributionTypeEnum, Country, HttpUrlStr, LicenseEnum
26# --------
27# Somesy configuration model
30class SomesyBaseModel(BaseModel):
31 """Customized pydantic BaseModel for somesy.
33 Apart from some general tweaks for better defaults,
34 adds a private `_key_order` field, which is used to track the
35 preferred order for serialization (usually coming from some existing input).
37 It can be set on an instance using the set_key_order method,
38 and is preserved by `copy()`.
40 NOTE: The custom order is intended for leaf models (no further nested models),
41 custom order will not work correctly across nesting layers.
42 """
44 model_config = dict(
45 extra="forbid",
46 validate_assignment=True,
47 populate_by_name=True,
48 str_strip_whitespace=True,
49 str_min_length=1,
50 )
52 # ----
53 # Key order magic
55 _key_order: List[str] = PrivateAttr([])
56 """List of field names (NOT aliases!) in the order they should be written in."""
58 @classmethod
59 @functools.lru_cache() # compute once per class
60 def _aliases(cls) -> Dict[str, str]:
61 """Map back from alias field names to internal field names."""
62 return {v.alias or k: k for k, v in cls.model_fields.items()}
64 @classmethod
65 def make_partial(cls, dct):
66 """Construct unvalidated partial model from dict.
68 Handles aliases correctly, unlike `construct`.
69 """
70 un_alias = cls._aliases()
71 return cls.model_construct(**{un_alias.get(k) or k: v for k, v in dct.items()})
73 def set_key_order(self, keys: List[str]):
74 """Setter for custom key order used in serialization."""
75 un_alias = self._aliases()
76 # make sure we use the _actual_ field names
77 self._key_order = list(map(lambda k: un_alias.get(k) or k, keys))
79 def model_copy(self, *args, **kwargs):
80 """Patched copy method (to preserve custom key order)."""
81 ret = super().model_copy(*args, **kwargs)
82 ret.set_key_order(list(self._key_order))
83 return ret
85 @staticmethod
86 def _patch_kwargs_defaults(kwargs):
87 """Set some default arguments if they are not set by kwargs."""
88 for key in ["exclude_defaults", "exclude_none"]:
89 if kwargs.get(key, None) is None:
90 kwargs[key] = True
92 def _reorder_dict(self, dct):
93 """Return dict with patched key order (according to `self._key_order`).
95 Keys in `dct` not listed in `self._key_order` come after all others.
97 Used to patch up `model_dump()` and `model_dump_json()`.
98 """
99 key_order = self._key_order or []
100 existing = set(key_order).intersection(set(dct.keys()))
101 key_order = [k for k in key_order if k in existing]
102 key_order += list(set(dct.keys()) - set(key_order))
103 return {k: dct[k] for k in key_order}
105 def model_dump(self, *args, **kwargs):
106 """Patched dict method (to preserve custom key order)."""
107 self._patch_kwargs_defaults(kwargs)
108 by_alias = kwargs.pop("by_alias", False)
110 dct = super().model_dump(*args, **kwargs, by_alias=False)
111 ret = self._reorder_dict(dct)
113 if by_alias:
114 ret = {self.model_fields[k].alias or k: v for k, v in ret.items()}
115 return ret
117 def model_dump_json(self, *args, **kwargs):
118 """Patched json method (to preserve custom key order)."""
119 self._patch_kwargs_defaults(kwargs)
120 by_alias = kwargs.pop("by_alias", False)
122 # loop back json through dict to apply custom key order
123 dct = json.loads(super().model_dump_json(*args, **kwargs, by_alias=False))
124 ret = self._reorder_dict(dct)
126 if by_alias:
127 ret = {self.model_fields[k].alias or k: v for k, v in ret.items()}
128 return json.dumps(ret, ensure_ascii=False)
131_SOMESY_TARGETS = [
132 "cff",
133 "pyproject",
134 "package_json",
135 "codemeta",
136 "julia",
137 "fortran",
138 "pom_xml",
139 "mkdocs",
140 "rust",
141]
144class SomesyConfig(SomesyBaseModel):
145 """Pydantic model for somesy tool configuration.
147 Note that all fields match CLI options, and CLI options will override the
148 values declared in a somesy input file (such as `somesy.toml`).
149 """
151 @model_validator(mode="before")
152 @classmethod
153 def at_least_one_target(cls, values):
154 """Check that at least one output file is enabled."""
155 if all(map(lambda x: values.get(f"no_sync_{x}"), _SOMESY_TARGETS)):
156 msg = "No sync target enabled, nothing to do. Probably this is a mistake?"
157 raise ValueError(msg)
159 return values
161 # cli flags
162 show_info: Annotated[
163 bool,
164 Field(
165 description="Show basic information messages on run (-v flag).",
166 ),
167 ] = False
168 verbose: Annotated[
169 bool, Field(description="Show verbose messages on run (-vv flag).")
170 ] = False
171 debug: Annotated[
172 bool, Field(description="Show debug messages on run (-vvv flag).")
173 ] = False
175 input_file: Annotated[
176 Optional[Path], Field(description="Project metadata input file path.")
177 ] = Path("somesy.toml")
179 no_sync_pyproject: Annotated[
180 bool, Field(description="Do not sync with pyproject.toml.")
181 ] = False
182 pyproject_file: Annotated[
183 Union[Path, List[Path]], Field(description="pyproject.toml file path.")
184 ] = Path("pyproject.toml")
186 no_sync_package_json: Annotated[
187 bool, Field(description="Do not sync with package.json.")
188 ] = False
189 package_json_file: Annotated[
190 Union[Path, List[Path]], Field(description="package.json file path.")
191 ] = Path("package.json")
193 no_sync_julia: Annotated[
194 bool, Field(description="Do not sync with Project.toml.")
195 ] = False
196 julia_file: Annotated[
197 Union[Path, List[Path]], Field(description="Project.toml file path.")
198 ] = Path("Project.toml")
200 no_sync_fortran: Annotated[
201 bool, Field(description="Do not sync with fpm.toml.")
202 ] = False
203 fortran_file: Annotated[
204 Union[Path, List[Path]], Field(description="fpm.toml file path.")
205 ] = Path("fpm.toml")
207 no_sync_pom_xml: Annotated[bool, Field(description="Do not sync with pom.xml.")] = (
208 False
209 )
210 pom_xml_file: Annotated[
211 Union[Path, List[Path]], Field(description="pom.xml file path.")
212 ] = Path("pom.xml")
214 no_sync_mkdocs: Annotated[
215 bool, Field(description="Do not sync with mkdocs.yml.")
216 ] = False
217 mkdocs_file: Annotated[
218 Union[Path, List[Path]], Field(description="mkdocs.yml file path.")
219 ] = Path("mkdocs.yml")
221 no_sync_rust: Annotated[bool, Field(description="Do not sync with Cargo.toml.")] = (
222 False
223 )
224 rust_file: Annotated[
225 Union[Path, List[Path]], Field(description="Cargo.toml file path.")
226 ] = Path("Cargo.toml")
228 no_sync_cff: Annotated[bool, Field(description="Do not sync with CFF.")] = False
229 cff_file: Annotated[
230 Union[Path, List[Path]], Field(description="CFF file path.")
231 ] = Path("CITATION.cff")
233 no_sync_codemeta: Annotated[
234 bool, Field(description="Do not sync with codemeta.json.")
235 ] = False
236 codemeta_file: Annotated[
237 Union[Path, List[Path]], Field(description="codemeta.json file path.")
238 ] = Path("codemeta.json")
239 merge_codemeta: Annotated[
240 bool,
241 Field(
242 description="Merge codemeta.json with with an existing codemeta.json file."
243 ),
244 ] = False
246 # property to pass validation for all inputs/outputs
247 pass_validation: Annotated[
248 Optional[bool],
249 Field(description="Pass validation for all output files."),
250 ] = False
252 # packages (sub-folders) for monorepos with their own somesy config
253 packages: Annotated[
254 Optional[Union[Path, List[Path]]],
255 Field(
256 description="Packages (sub-folders) for monorepos with their own somesy config."
257 ),
258 ] = None
260 def log_level(self) -> SomesyLogLevel:
261 """Return log level derived from this configuration."""
262 return SomesyLogLevel.from_flags(
263 info=self.show_info, verbose=self.verbose, debug=self.debug
264 )
266 def update_log_level(self, log_level: SomesyLogLevel):
267 """Update config flags according to passed log level."""
268 self.show_info = log_level == SomesyLogLevel.INFO
269 self.verbose = log_level == SomesyLogLevel.VERBOSE
270 self.debug = log_level == SomesyLogLevel.DEBUG
272 def get_input(self) -> SomesyInput:
273 """Based on the somesy config, load the complete somesy input."""
274 # get metadata+config from specified input file
275 somesy_input = SomesyInput.from_input_file(self.input_file)
276 # update input with merged config settings (cli overrides config file)
277 dct: Dict[str, Any] = {}
278 dct.update(somesy_input.config or {})
279 dct.update(self.model_dump())
280 somesy_input.config = SomesyConfig(**dct)
281 return somesy_input
283 def resolve_paths(self, base_dir: Path) -> None:
284 """Resolve all paths in the config relative to the given base directory.
286 Args:
287 base_dir: The base directory to resolve paths against.
289 """
291 def resolve_path(
292 paths: Optional[Union[Path, List[Path]]],
293 ) -> Optional[Union[Path, List[Path]]]:
294 if paths is None:
295 return None
296 if isinstance(paths, list):
297 return [base_dir / p for p in paths]
298 return base_dir / paths
300 # Resolve all file paths
301 resolved_input = resolve_path(self.input_file)
302 self.input_file = resolved_input if isinstance(resolved_input, Path) else None
303 self.pyproject_file = resolve_path(self.pyproject_file)
304 self.package_json_file = resolve_path(self.package_json_file)
305 self.julia_file = resolve_path(self.julia_file)
306 self.fortran_file = resolve_path(self.fortran_file)
307 self.pom_xml_file = resolve_path(self.pom_xml_file)
308 self.mkdocs_file = resolve_path(self.mkdocs_file)
309 self.rust_file = resolve_path(self.rust_file)
310 self.cff_file = resolve_path(self.cff_file)
311 self.codemeta_file = resolve_path(self.codemeta_file)
312 self.packages = resolve_path(self.packages)
315# --------
316# Project metadata model (modified from CITATION.cff)
319class ContributorBaseModel(SomesyBaseModel):
320 """Base model for Person and Entity models.
322 This schema is based on CITATION.cff 1.2, modified and extended for the needs of somesy.
323 """
325 email: Annotated[
326 Optional[str],
327 Field(
328 pattern=r"^[\S]+@[\S]+\.[\S]{2,}$",
329 description="The person's email address.",
330 ),
331 ] = None
333 alias: Annotated[Optional[str], Field(description="The contributor's alias.")] = (
334 None
335 )
336 address: Annotated[
337 Optional[str], Field(description="The contributor's address.")
338 ] = None
339 city: Annotated[Optional[str], Field(description="The entity's city.")] = None
340 country: Annotated[
341 Optional[Country], Field(description="The entity's country.")
342 ] = None
343 fax: Annotated[Optional[str], Field(description="The person's fax number.")] = None
344 post_code: Annotated[
345 Optional[str], Field(alias="post-code", description="The entity's post-code.")
346 ] = None
347 region: Annotated[Optional[str], Field(description="The entity's region.")] = None
348 tel: Annotated[Optional[str], Field(description="The entity's phone number.")] = (
349 None
350 )
352 # ----
353 # somesy-specific extensions
354 author: Annotated[
355 bool,
356 Field(
357 description="Indicates whether the entity is an author of the project (i.e. significant contributor)."
358 ),
359 ] = False
360 publication_author: Annotated[
361 Optional[bool],
362 Field(
363 description="Indicates whether the entity is to be listed as an author in academic citations."
364 ),
365 ] = None
366 maintainer: Annotated[
367 bool,
368 Field(
369 description="Indicates whether the entity is a maintainer of the project (i.e. for contact)."
370 ),
371 ] = False
373 # NOTE: CFF 1.3 (once done) might provide ways for refined contributor description. That should be implemented here.
374 contribution: Annotated[
375 Optional[str],
376 Field(description="Summary of how the entity contributed to the project."),
377 ] = None
378 contribution_types: Annotated[
379 Optional[List[ContributionTypeEnum]],
380 Field(
381 description="Relevant types of contributions (see https://allcontributors.org/docs/de/emoji-key).",
382 min_length=1,
383 ),
384 ] = None
385 contribution_begin: Annotated[
386 Optional[date], Field(description="Beginning date of the contribution.")
387 ] = None
388 contribution_end: Annotated[
389 Optional[date], Field(description="Ending date of the contribution.")
390 ] = None
392 @model_validator(mode="before")
393 @classmethod
394 def author_implies_publication(cls, values):
395 """Ensure consistency of author and publication_author."""
396 if values.get("author"):
397 # NOTE: explicitly check for False (different case from None = missing!)
398 if values.get("publication_author") is False:
399 msg = "Combining author=true and publication_author=false is invalid!"
400 raise ValueError(msg)
401 values["publication_author"] = True
402 return values
404 # helper methods
405 @property
406 def full_name(self) -> str:
407 """Return the name of the contributor."""
408 pass
410 def to_name_email_string(self) -> str:
411 """Convert project metadata person object to poetry string for person format `full name <x@y.z>`."""
412 if self.email:
413 return f"{self.full_name} <{self.email}>"
414 else:
415 return self.full_name
417 @classmethod
418 def from_name_email_string(cls, person: str):
419 """Return the type of class based on an name/e-mail string like `full name <x@y.z>`.
421 If the name is `A B C`, then `A B` will be the given names and `C` will be the family name.
422 """
423 pass
426class Entity(ContributorBaseModel):
427 """Metadata about an entity in the context of a software project ownership.
429 An entity, i.e., an institution, team, research group, company, conference, etc., as opposed to a single natural person.
430 This schema is based on CITATION.cff 1.2, modified and extended for the needs of somesy.
431 """
433 # NOTE: we rely on the defined aliases for direct CITATION.cff interoperability.
435 date_end: Annotated[
436 Optional[date],
437 Field(
438 alias="date-end",
439 description="The entity's ending date, e.g., when the entity is a conference.",
440 ),
441 ] = None
442 date_start: Annotated[
443 Optional[date],
444 Field(
445 alias="date-start",
446 description="The entity's starting date, e.g., when the entity is a conference.",
447 ),
448 ] = None
449 location: Annotated[
450 Optional[str],
451 Field(
452 description="The entity's location, e.g., when the entity is a conference."
453 ),
454 ] = None
455 name: Annotated[str, Field(description="The entity's name.")]
456 website: Annotated[
457 Optional[HttpUrlStr], Field(description="The entity's website.")
458 ] = None
459 rorid: Annotated[
460 Optional[HttpUrlStr],
461 Field(
462 description="The entity's ROR ID url **(not required, but highly suggested)**."
463 ),
464 ] = None
466 # helper methods
467 @property
468 def full_name(self) -> str:
469 """Use same property as Person for code integration."""
470 return self.name
472 @classmethod
473 def from_name_email_string(cls, entity: str) -> Entity:
474 """Return an `Entity` based on an name/e-mail string like `name <x@y.z>`."""
475 m = re.match(r"\s*([^<]+)<([^>]+)>", entity)
476 if m is None:
477 return Entity(**{"name": entity})
479 name, mail = (
480 m.group(1).strip(),
481 m.group(2).strip(),
482 )
483 return Entity(
484 **{
485 "name": name,
486 "email": mail,
487 }
488 )
490 def same_person(self, other: Entity) -> bool:
491 """Return whether two Entity metadata records are about the same real person.
493 Uses heuristic match based on email and name (whichever are provided).
494 """
495 if not isinstance(other, Entity):
496 return False
497 if self.rorid is not None and other.rorid is not None:
498 if self.rorid == other.rorid:
499 return True
500 if self.website is not None and other.website is not None:
501 if self.website == other.website:
502 return True
503 if self.email is not None and other.email is not None:
504 if self.email == other.email:
505 return True
506 return self.name == other.name
508 def model_dump_json(self, *args, **kwargs):
509 """Patched json method (to preserve custom key order), remove rorid and set it as website if it is not None."""
510 ret = super().model_dump_json(*args, **kwargs)
511 # convert ret to dict
512 ret = json.loads(ret)
513 if self.rorid is not None and "website" not in ret:
514 ret["website"] = str(self.rorid)
515 ret.pop("rorid")
516 # convert ret back to json string
517 return json.dumps(ret)
520class Person(ContributorBaseModel):
521 """Metadata about a person in the context of a software project.
523 This schema is based on CITATION.cff 1.2, modified and extended for the needs of somesy.
524 """
526 # NOTE: we rely on the defined aliases for direct CITATION.cff interoperability.
528 orcid: Annotated[
529 Optional[HttpUrlStr],
530 Field(
531 description="The person's ORCID url **(not required, but highly suggested)**."
532 ),
533 ] = None
534 family_names: Annotated[
535 str, Field(alias="family-names", description="The person's family names.")
536 ]
537 given_names: Annotated[
538 str, Field(alias="given-names", description="The person's given names.")
539 ]
540 name_particle: Annotated[
541 Optional[str],
542 Field(
543 alias="name-particle",
544 description="The person's name particle, e.g., a nobiliary particle or a preposition meaning 'of' or 'from'"
545 " (for example 'von' in 'Alexander von Humboldt').",
546 examples=["von"],
547 ),
548 ] = None
549 name_suffix: Annotated[
550 Optional[str],
551 Field(
552 alias="name-suffix",
553 description="The person's name-suffix, e.g. 'Jr.' for Sammy Davis Jr. or 'III' for Frank Edwin Wright III.",
554 examples=["Jr.", "III"],
555 ),
556 ] = None
557 affiliation: Annotated[
558 Optional[str], Field(description="The person's affiliation.")
559 ] = None
561 # helper methods
563 @property
564 def full_name(self) -> str:
565 """Return the full name of the person."""
566 names = []
568 if self.given_names:
569 names.append(self.given_names)
571 if self.name_particle:
572 names.append(self.name_particle)
574 if self.family_names:
575 names.append(self.family_names)
577 if self.name_suffix:
578 names.append(self.name_suffix)
580 return " ".join(names) if names else ""
582 @classmethod
583 def from_name_email_string(cls, person: str) -> Person:
584 """Return a `Person` based on an name/e-mail string like `full name <x@y.z>`.
586 If the name is `A B C`, then `A B` will be the given names and `C` will be the family name.
587 """
588 m = re.match(r"\s*([^<]+)<([^>]+)>", person)
589 if m is None:
590 names = list(map(lambda s: s.strip(), person.split()))
591 return Person(
592 **{
593 "given-names": " ".join(names[:-1]),
594 "family-names": names[-1],
595 }
596 )
597 if m is None:
598 names = list(map(lambda s: s.strip(), person.split()))
599 return Person(
600 **{
601 "given-names": " ".join(names[:-1]),
602 "family-names": names[-1],
603 }
604 )
605 names, mail = (
606 list(map(lambda s: s.strip(), m.group(1).split())),
607 m.group(2).strip(),
608 )
609 # NOTE: for our purposes, does not matter what are given or family names,
610 # we only compare on full_name anyway.
611 return Person(
612 **{
613 "given-names": " ".join(names[:-1]),
614 "family-names": names[-1],
615 "email": mail,
616 }
617 )
619 def same_person(self, other) -> bool:
620 """Return whether two Person metadata records are about the same real person.
622 Uses heuristic match based on orcid, email and name (whichever are provided).
623 """
624 if not isinstance(other, Person):
625 return False
626 if self.orcid is not None and other.orcid is not None:
627 # having orcids is the best case, a real identifier
628 # NOTE: converting to str from pydantic-internal Url object for == !
629 return str(self.orcid) == str(other.orcid)
631 # otherwise, try to match according to mail/name
632 # sourcery skip: merge-nested-ifs
633 if self.email is not None and other.email is not None:
634 if self.email == other.email:
635 # an email address belongs to exactly one person
636 # => same email -> same person
637 return True
638 # otherwise, need to check name
639 # (a person often has multiple email addresses)
641 # no orcids, no/distinct email address
642 # -> decide based on full_name (which is always present)
643 return self.full_name == other.full_name
646class ProjectMetadata(SomesyBaseModel):
647 """Pydantic model for Project Metadata Input."""
649 model_config = dict(extra="ignore")
651 @field_validator("people")
652 @classmethod
653 def ensure_distinct_people(cls, people):
654 """Make sure that no person is listed twice in the same list."""
655 for i in range(len(people)):
656 for j in range(i + 1, len(people)):
657 if people[i].same_person(people[j]):
658 p1 = pretty_repr(json.loads(people[i].model_dump_json()))
659 p2 = pretty_repr(json.loads(people[j].model_dump_json()))
660 msg = f"Same person is listed twice:\n{p1}\n{p2}"
661 raise ValueError(msg)
662 return people
664 @field_validator("entities")
665 @classmethod
666 def ensure_distinct_entities(cls, entities):
667 """Make sure that no entity is listed twice in the same list."""
668 for i in range(len(entities)):
669 for j in range(i + 1, len(entities)):
670 if entities[i].same_person(entities[j]):
671 e1 = pretty_repr(json.loads(entities[i].model_dump_json()))
672 e2 = pretty_repr(json.loads(entities[j].model_dump_json()))
673 msg = f"Same entity is listed twice:\n{e1}\n{e2}"
674 raise ValueError(msg)
675 return entities
677 @model_validator(mode="after")
678 def at_least_one_author(self) -> ProjectMetadata:
679 """Make sure there is at least one author."""
680 if not self.people and not self.entities:
681 raise ValueError(
682 "There have to be at least a person or an organization in the input"
683 )
684 if not any(map(lambda p: p.author, self.people)) and not any(
685 map(lambda e: e.author, self.entities)
686 ):
687 raise ValueError("At least one person must be an author of this project.")
688 return self
690 name: Annotated[str, Field(description="Project name.")]
691 description: Annotated[str, Field(description="Project description.")]
692 version: Annotated[Optional[str], Field(description="Project version.")] = None
693 license: Annotated[LicenseEnum, Field(description="SPDX License string.")]
695 homepage: Annotated[
696 Optional[HttpUrlStr], Field(description="URL of the project homepage.")
697 ] = None
698 repository: Annotated[
699 Optional[HttpUrlStr],
700 Field(description="URL of the project source code repository."),
701 ] = None
702 documentation: Annotated[
703 Optional[HttpUrlStr], Field(description="URL of the project documentation.")
704 ] = None
706 keywords: Annotated[
707 Optional[List[str]],
708 Field(min_length=1, description="Keywords that describe the project."),
709 ] = None
711 people: Annotated[
712 Optional[List[Person]],
713 Field(
714 description="Project authors, maintainers and contributors.",
715 default_factory=list,
716 ),
717 ]
719 entities: Annotated[
720 Optional[List[Entity]],
721 Field(
722 description="Project authors, maintainers and contributors as entities (organizations).",
723 default_factory=list,
724 ),
725 ]
727 def authors(self):
728 """Return people and entities explicitly marked as authors."""
729 authors = [p for p in self.people if p.author]
730 authors.extend([e for e in self.entities if e.author])
731 return authors
733 def publication_authors(self):
734 """Return people marked as publication authors.
736 This always includes people marked as authors.
737 """
738 # return an empty list if no publication authors are specified
739 if not any(map(lambda p: p.publication_author, self.people)) and not any(
740 map(lambda p: p.publication_author, self.entities)
741 ):
742 return []
743 publication_authors = [p for p in self.people if p.publication_author]
744 publication_authors.extend([e for e in self.entities if e.publication_author])
745 return publication_authors
747 def maintainers(self):
748 """Return people and entities marked as maintainers."""
749 maintainers = [p for p in self.people if p.maintainer]
750 maintainers.extend([e for e in self.entities if e.maintainer])
751 return maintainers
753 def contributors(self):
754 """Return only people and entities not marked as authors."""
755 contributors = [p for p in self.people if not p.author]
756 contributors.extend([e for e in self.entities if not e.author])
757 return contributors
760class SomesyInput(SomesyBaseModel):
761 """The complete somesy input file (`somesy.toml`) or section (`pyproject.toml`)."""
763 _origin: Optional[Path]
765 project: Annotated[
766 ProjectMetadata,
767 Field(description="Project metadata to be used and synchronized."),
768 ]
769 config: Annotated[
770 Optional[SomesyConfig],
771 Field(
772 description="somesy tool configuration (matches CLI flags).",
773 default_factory=lambda: SomesyConfig(),
774 ),
775 ]
777 # if config.input_file is set, use it as origin
778 @model_validator(mode="after")
779 def set_origin(self):
780 """Set the origin of the input file."""
781 if self.config and self.config.input_file:
782 self._origin = self.config.input_file
783 return self
785 def is_somesy_file(self) -> bool:
786 """Return whether this somesy input is from a somesy config file.
788 That means, returns False if it is from pyproject.toml or package.json.
789 """
790 return self.is_somesy_file_path(self._origin or Path("."))
792 @classmethod
793 def is_somesy_file_path(cls, path: Path) -> bool:
794 """Return whether the path looks like a somesy config file.
796 That means, returns False if it is e.g. pyproject.toml or package.json.
797 """
798 return str(path).endswith("somesy.toml")
800 @classmethod
801 def from_input_file(cls, path: Path) -> SomesyInput:
802 """Load somesy input from given file."""
803 content = get_input_content(path)
804 ret = SomesyInput(**content)
805 ret._origin = path
806 return ret